code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _snake_case ( lowercase ) -> float: if num <= 0: raise ValueError("""math domain error""" ) return quad(__UpperCAmelCase , 0 , __UpperCAmelCase , args=(__UpperCAmelCase) )[0] def _snake_case ( lowercase , lowercase ) -> float: return math.pow(__UpperCAmelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
713
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Union[str, Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = len(__UpperCamelCase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCamelCase )}.""" ) # get prompt text embeddings __a : Tuple = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __a : Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Union[str, Any] = text_embeddings.shape __a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) __a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Optional[Any] = [""""""] elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=""" f""" {type(__UpperCamelCase )}.""" ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: __a : Tuple = negative_prompt __a : Any = text_input_ids.shape[-1] __a : List[str] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) __a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[str] = uncond_embeddings.shape[1] __a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 ) __a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Any = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device ) __a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: __a : Optional[int] = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) __a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Optional[Any] = latents_reference.to(self.device ) __a : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : int = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : Optional[Any] = 0 if dx < 0 else dx __a : Optional[Any] = 0 if dy < 0 else dy __a : Optional[int] = max(-dx , 0 ) __a : Optional[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[Any] = {} if accepts_eta: __a : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual __a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __a , __a : List[str] = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents __a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __a , __a : int = self.safety_checker( images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[int] = None if output_type == "pil": __a : str = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
697
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def _snake_case ( lowercase ) -> Tuple: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) __a : Optional[Any] = precision __a : List[Any] = ceil(precision / 1_4 ) __a : List[Any] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() __a : Any = 1 __a : Any = 1_3_5_9_1_4_0_9 __a : Optional[Any] = Decimal(lowerCAmelCase__ ) for k in range(1 , lowerCAmelCase__ ): __a : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCAmelCase__ ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __SCREAMING_SNAKE_CASE : int = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
714
'''simple docstring''' import numpy as np from PIL import Image def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : Any = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : Union[str, Any] = 0 __a : Dict = 0 __a : Optional[Any] = 0 __a : Tuple = 0 # compute the shape of the output matrix __a : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a : int = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : Optional[Any] = 0 __a : str = 0 return updated_arr def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : int = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : int = 0 __a : Optional[Any] = 0 __a : str = 0 __a : List[Any] = 0 # compute the shape of the output matrix __a : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a : Any = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : str = 0 __a : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __SCREAMING_SNAKE_CASE : str = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
0
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : int = "▁" __SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} __SCREAMING_SNAKE_CASE : str = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } __SCREAMING_SNAKE_CASE : Any = {"vinai/bartpho-syllable": 1_024} class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' __a : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token __a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) __a : Union[str, Any] = vocab_file __a : Optional[int] = monolingual_vocab_file __a : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __a : str = {} __a : str = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids: __a : Union[str, Any] = cnt cnt += 1 with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): __a : Union[str, Any] = line.strip().split()[0] __a : Union[str, Any] = len(self.fairseq_tokens_to_ids ) if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids: __a : List[str] = len(self.fairseq_tokens_to_ids ) __a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): '''simple docstring''' __a : int = self.__dict__.copy() __a : Any = None __a : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCamelCase ): '''simple docstring''' __a : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __a : Optional[Any] = {} __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : Dict = [self.cls_token_id] __a : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Optional[Any] = [self.sep_token_id] __a : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' return self.fairseq_ids_to_tokens[index] def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : Dict = """""".join(_UpperCamelCase ).replace(_UpperCamelCase , """ """ ).strip() return out_string def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' if not os.path.isdir(_UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : Dict = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __a : str = os.path.join( _UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , """wb""" ) as fi: __a : int = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( _UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"""{str(_UpperCamelCase )} \n""" ) return out_vocab_file, out_monolingual_vocab_file
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : str = { 'facebook/data2vec-vision-base-ft': ( 'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE__ ( a__ ): lowercase__ = "data2vec-vision" def __init__( self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=224 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=True , __UpperCamelCase=[3, 5, 7, 11] , __UpperCamelCase=[1, 2, 3, 6] , __UpperCamelCase=True , __UpperCamelCase=0.4 , __UpperCamelCase=256 , __UpperCamelCase=1 , __UpperCamelCase=False , __UpperCamelCase=255 , **__UpperCamelCase , ): '''simple docstring''' super().__init__(**_A ) __a : Tuple = hidden_size __a : Dict = num_hidden_layers __a : List[Any] = num_attention_heads __a : Any = intermediate_size __a : Optional[int] = hidden_act __a : Optional[Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : List[str] = initializer_range __a : Tuple = layer_norm_eps __a : Union[str, Any] = image_size __a : Any = patch_size __a : Union[str, Any] = num_channels __a : Optional[Any] = use_mask_token __a : List[str] = use_absolute_position_embeddings __a : int = use_relative_position_bias __a : int = use_shared_relative_position_bias __a : Optional[Any] = layer_scale_init_value __a : List[str] = drop_path_rate __a : Optional[Any] = use_mean_pooling # decode head attributes (semantic segmentation) __a : int = out_indices __a : Optional[Any] = pool_scales # auxiliary head attributes (semantic segmentation) __a : List[str] = use_auxiliary_head __a : Any = auxiliary_loss_weight __a : Union[str, Any] = auxiliary_channels __a : Optional[Any] = auxiliary_num_convs __a : Tuple = auxiliary_concat_input __a : Dict = semantic_loss_ignore_index class SCREAMING_SNAKE_CASE__ ( a__ ): lowercase__ = version.parse("1.11" ) @property def __lowerCamelCase ( self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCamelCase ( self ): '''simple docstring''' return 1E-4
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[str] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class SCREAMING_SNAKE_CASE__ ( _A ): lowercase__ = "deformable_detr" lowercase__ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=300 , __UpperCamelCase=1024 , __UpperCamelCase=6 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=1024 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=False , __UpperCamelCase=300 , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.2_5 , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) __a : Dict = CONFIG_MAPPING["resnet"](out_features=["""stage4"""] ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): __a : int = backbone_config.get("""model_type""" ) __a : Optional[int] = CONFIG_MAPPING[backbone_model_type] __a : Dict = config_class.from_dict(__lowerCamelCase ) __a : Tuple = use_timm_backbone __a : Optional[int] = backbone_config __a : str = num_channels __a : Tuple = num_queries __a : str = max_position_embeddings __a : Tuple = d_model __a : List[Any] = encoder_ffn_dim __a : Optional[int] = encoder_layers __a : Optional[Any] = encoder_attention_heads __a : List[str] = decoder_ffn_dim __a : Optional[int] = decoder_layers __a : Union[str, Any] = decoder_attention_heads __a : List[str] = dropout __a : List[Any] = attention_dropout __a : int = activation_dropout __a : Dict = activation_function __a : Dict = init_std __a : int = init_xavier_std __a : Dict = encoder_layerdrop __a : str = auxiliary_loss __a : List[Any] = position_embedding_type __a : List[str] = backbone __a : Optional[int] = use_pretrained_backbone __a : Optional[Any] = dilation # deformable attributes __a : Dict = num_feature_levels __a : Optional[Any] = encoder_n_points __a : List[Any] = decoder_n_points __a : Tuple = two_stage __a : Optional[int] = two_stage_num_proposals __a : str = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError("""If two_stage is True, with_box_refine must be True.""" ) # Hungarian matcher __a : str = class_cost __a : Union[str, Any] = bbox_cost __a : List[Any] = giou_cost # Loss coefficients __a : Optional[int] = mask_loss_coefficient __a : int = dice_loss_coefficient __a : Optional[Any] = bbox_loss_coefficient __a : int = giou_loss_coefficient __a : str = eos_coefficient __a : Dict = focal_alpha __a : List[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def __lowerCamelCase ( self ): '''simple docstring''' return self.d_model def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __a : Any = self.backbone_config.to_dict() __a : Any = self.__class__.model_type return output
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=10 , __UpperCamelCase=3 , __UpperCamelCase=32 * 4 , __UpperCamelCase=32 * 6 , __UpperCamelCase=4 , __UpperCamelCase=32 , ): '''simple docstring''' __a : str = parent __a : Union[str, Any] = batch_size __a : Optional[Any] = is_training __a : List[Any] = use_auxiliary_loss __a : Dict = num_queries __a : Optional[int] = num_channels __a : List[str] = min_size __a : Dict = max_size __a : Dict = num_labels __a : List[str] = mask_feature_size def __lowerCamelCase ( self ): '''simple docstring''' __a : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase_ ) __a : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase_ ) __a : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase_ ) > 0.5 ).float() __a : str = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase_ ) > 0.5).long() __a : Any = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __lowerCamelCase ( self ): '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.prepare_config_and_inputs() __a : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Tuple = output.encoder_hidden_states __a : int = output.pixel_decoder_hidden_states __a : str = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase_ ) , config.decoder_config.decoder_layers ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' with torch.no_grad(): __a : Tuple = MaskFormerModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() __a : List[str] = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ ) __a : Dict = model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase_ , lowerCamelCase_ ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() def comm_check_on_output(__UpperCamelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __a : List[Any] = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ ) __a : List[Any] = model(lowerCamelCase_ ) comm_check_on_output(lowerCamelCase_ ) __a : List[str] = model( pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ) comm_check_on_output(lowerCamelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowercase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowercase__ = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = MaskFormerModelTester(self ) __a : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase_ ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Union[str, Any] = model_class(lowerCamelCase_ ) __a : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: __a : Optional[int] = MaskFormerModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = (self.model_tester.min_size,) * 2 __a : Optional[int] = { '''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=lowerCamelCase_ ), '''class_labels''': torch.zeros(2 , 10 , device=lowerCamelCase_ ).long(), } __a : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase_ ) __a : Dict = model(**lowerCamelCase_ ) self.assertTrue(outputs.loss is not None ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Union[str, Any] = model_class(lowerCamelCase_ ).to(lowerCamelCase_ ) __a : Dict = model(**lowerCamelCase_ , output_attentions=lowerCamelCase_ ) self.assertTrue(outputs.attentions is not None ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss __a : Union[str, Any] = self.all_model_classes[1] __a : List[Any] = self.model_tester.prepare_config_and_inputs() __a : Optional[Any] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() __a : Tuple = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.all_model_classes[1] __a : List[Any] = self.model_tester.prepare_config_and_inputs() __a : Tuple = True __a : Dict = True __a : Any = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.train() __a : List[str] = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ) __a : Dict = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __a : Optional[int] = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't __a : int = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __a : List[str] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE : Dict = 1E-4 def _snake_case ( ) -> Tuple: __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase_ ) __a : List[str] = self.default_image_processor __a : List[Any] = prepare_img() __a : Optional[int] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) __a : Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 800, 1088) ) with torch.no_grad(): __a : Dict = model(**lowerCamelCase_ ) __a : List[str] = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) __a : Any = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) __a : Union[str, Any] = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCamelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase_ ) .eval() ) __a : int = self.default_image_processor __a : List[str] = prepare_img() __a : Any = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) __a : Dict = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 800, 1088) ) with torch.no_grad(): __a : str = model(**lowerCamelCase_ ) # masks_queries_logits __a : int = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __a : List[str] = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] __a : int = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) # class_queries_logits __a : int = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __a : Tuple = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(lowerCamelCase_ ) .eval() ) __a : int = self.default_image_processor __a : Optional[Any] = prepare_img() __a : List[str] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) __a : Optional[int] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase_ , (1, 3, 800, 1088) ) with torch.no_grad(): __a : Tuple = model(**lowerCamelCase_ ) # masks_queries_logits __a : Tuple = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) __a : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -10.7711]] __a : Tuple = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) # class_queries_logits __a : int = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) __a : Union[str, Any] = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowerCamelCase_ ) .eval() ) __a : Optional[int] = self.default_image_processor __a : List[str] = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) __a : List[Any] = inputs['''pixel_values'''].to(lowerCamelCase_ ) __a : Any = [el.to(lowerCamelCase_ ) for el in inputs['''mask_labels''']] __a : Tuple = [el.to(lowerCamelCase_ ) for el in inputs['''class_labels''']] with torch.no_grad(): __a : int = model(**lowerCamelCase_ ) self.assertTrue(outputs.loss is not None )
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): lowercase__ = ['pixel_values'] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = 32 , __UpperCamelCase=PILImageResampling.BILINEAR , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : Dict = do_resize __a : str = do_rescale __a : Tuple = size_divisor __a : Union[str, Any] = resample super().__init__(**__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ): '''simple docstring''' __a , __a : Dict = get_image_size(__UpperCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor __a : Optional[Any] = height // size_divisor * size_divisor __a : Dict = width // size_divisor * size_divisor __a : List[str] = resize(__UpperCamelCase , (new_h, new_w) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) return image def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ): '''simple docstring''' return rescale(image=__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ): '''simple docstring''' __a : Any = do_resize if do_resize is not None else self.do_resize __a : Any = do_rescale if do_rescale is not None else self.do_rescale __a : Tuple = size_divisor if size_divisor is not None else self.size_divisor __a : str = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) __a : Union[str, Any] = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. __a : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for img in images] if do_resize: __a : Union[str, Any] = [self.resize(__UpperCamelCase , size_divisor=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_rescale: __a : str = [self.rescale(__UpperCamelCase , scale=1 / 255 ) for image in images] __a : str = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] __a : Optional[Any] = {"""pixel_values""": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=4 , ): '''simple docstring''' __a : List[str] = parent __a : Tuple = batch_size __a : Tuple = seq_length __a : Optional[Any] = is_training __a : List[Any] = use_attention_mask __a : Tuple = use_token_type_ids __a : int = use_labels __a : Union[str, Any] = vocab_size __a : List[Any] = hidden_size __a : int = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : List[str] = hidden_act __a : List[str] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : Tuple = type_vocab_size __a : Dict = type_sequence_label_size __a : Union[str, Any] = initializer_range __a : Optional[Any] = num_choices def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Tuple = None if self.use_attention_mask: __a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __a : List[str] = None if self.use_token_type_ids: __a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : int = RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = self.prepare_config_and_inputs() __a , __a , __a , __a : Optional[int] = config_and_inputs __a : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' __a : str = self.prepare_config_and_inputs() __a , __a , __a , __a : Dict = config_and_inputs __a : Optional[int] = True __a : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ): lowercase__ = True lowercase__ = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = FlaxRobertaModelTester(self ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __a : List[Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=snake_case_ ) __a : int = model(np.ones((1, 1) ) ) self.assertIsNotNone(snake_case_ )
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Tuple = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase__ = "nat" lowercase__ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __UpperCamelCase=4 , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[3, 4, 6, 5] , __UpperCamelCase=[2, 4, 8, 16] , __UpperCamelCase=7 , __UpperCamelCase=3.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(**_lowercase ) __a : Any = patch_size __a : Optional[Any] = num_channels __a : Optional[Any] = embed_dim __a : Tuple = depths __a : int = len(_lowercase ) __a : Optional[int] = num_heads __a : List[str] = kernel_size __a : str = mlp_ratio __a : str = qkv_bias __a : str = hidden_dropout_prob __a : Tuple = attention_probs_dropout_prob __a : Tuple = drop_path_rate __a : Dict = hidden_act __a : Union[str, Any] = layer_norm_eps __a : Tuple = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __a : Union[str, Any] = int(embed_dim * 2 ** (len(_lowercase ) - 1) ) __a : Union[str, Any] = layer_scale_init_value __a : Optional[Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_lowercase ) + 1 )] __a : Any = get_aligned_output_features_output_indices( out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
'''simple docstring''' import re def _snake_case ( lowercase ) -> Any: return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )] def _snake_case ( lowercase ) -> Optional[Any]: __a : int = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[Any]: try: __a : Dict = split_input(snake_case__ ) if upper: __a : Union[str, Any] = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: __a : Dict = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _snake_case ( lowercase ) -> Any: return to_simple_case(snake_case__ ) def _snake_case ( lowercase ) -> int: try: __a : List[Any] = to_simple_case(snake_case__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _snake_case ( lowercase , lowercase ) -> List[Any]: return to_complex_case(snake_case__ , snake_case__ , """_""" ) def _snake_case ( lowercase , lowercase ) -> Optional[Any]: return to_complex_case(snake_case__ , snake_case__ , """-""" ) if __name__ == "__main__": __import__('doctest').testmod()
700
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : int = self.unet.config.sample_size __a : Optional[int] = (batch_size, 3, img_size, img_size) __a : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a : Dict = self.scheduler.schedule[t] __a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a : Tuple = self.scheduler.step_correct( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , ) __a : Tuple = step_output.prev_sample __a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
697
0
'''simple docstring''' def _snake_case ( lowercase ) -> list: __a : Union[str, Any] = False while is_sorted is False: # Until all the indices are traversed keep looping __a : str = True for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __a , __a : List[Any] = input_list[i + 1], input_list[i] # swapping if elements not in order __a : List[str] = False for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __a , __a : Any = input_list[i + 1], input_list[i] # swapping if elements not in order __a : int = False return input_list if __name__ == "__main__": print('Enter list to be sorted') __SCREAMING_SNAKE_CASE : Union[str, Any] = [int(x) for x in input().split()] # inputing elements of the list in one line __SCREAMING_SNAKE_CASE : Optional[int] = odd_even_sort(input_list) print('The sorted list is') print(sorted_list)
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[int] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Any = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any: for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models __a : List[Any] = """lm_head""" __a : List[Any] = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : str = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Tuple = value elif weight_type == "weight_g": __a : Union[str, Any] = value elif weight_type == "weight_v": __a : Tuple = value elif weight_type == "bias": __a : List[Any] = value else: __a : Dict = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : List[str] = [] __a : Optional[int] = fairseq_model.state_dict() __a : List[Any] = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): __a : Optional[int] = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : str = True else: for key, mapped_key in MAPPING.items(): __a : str = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : int = True if "*" in mapped_key: __a : Dict = name.split(lowercase )[0].split(""".""" )[-2] __a : Any = mapped_key.replace("""*""" , lowercase ) if "weight_g" in name: __a : Optional[int] = """weight_g""" elif "weight_v" in name: __a : List[str] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : Union[str, Any] = """weight""" else: __a : int = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: __a : Dict = full_name.split("""conv_layers.""" )[-1] __a : Any = name.split(""".""" ) __a : Any = int(items[0] ) __a : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Any = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : List[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Union[str, Any]: if config_path is not None: __a : Optional[Any] = UniSpeechConfig.from_pretrained(lowercase ) else: __a : str = UniSpeechConfig() if is_finetuned: if dict_path: __a : List[str] = Dictionary.load_from_json(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : Any = target_dict.pad_index __a : int = target_dict.bos_index __a : Union[str, Any] = target_dict.eos_index __a : int = len(target_dict.symbols ) __a : Optional[int] = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Union[str, Any] = target_dict.indices # fairseq has the <pad> and <s> switched __a : str = 4_2 __a : Union[str, Any] = 4_3 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : Optional[int] = WavaVecaPhonemeCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : Optional[int] = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : Optional[Any] = UniSpeechForCTC(lowercase ) else: __a : List[str] = UniSpeechForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: __a , __a , __a : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __a : List[str] = model[0].eval() recursively_load_weights(lowercase , lowercase , lowercase ) hf_unispeech.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
702
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
0
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __SCREAMING_SNAKE_CASE : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __SCREAMING_SNAKE_CASE : Dict = { """allenai/led-base-16384""": 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _snake_case ( ) -> Optional[Any]: '''simple docstring''' __a : Optional[int] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __a : Optional[int] = bs[:] __a : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 __a : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def _snake_case ( lowercase ) -> Dict: '''simple docstring''' __a : Dict = set() __a : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __a : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = ["input_ids", "attention_mask"] def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' __a : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token __a : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token __a : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token __a : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token __a : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token __a : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __a : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="""utf-8""" ) as vocab_handle: __a : Any = json.load(__A ) __a : Optional[Any] = {v: k for k, v in self.encoder.items()} __a : Union[str, Any] = errors # how to handle errors in decoding __a : Any = bytes_to_unicode() __a : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="""utf-8""" ) as merges_handle: __a : str = merges_handle.read().split("""\n""" )[1:-1] __a : int = [tuple(merge.split() ) for merge in bpe_merges] __a : str = dict(zip(__A , range(len(__A ) ) ) ) __a : Optional[int] = {} __a : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __a : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __lowerCamelCase ( self ): '''simple docstring''' return len(self.encoder ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] __a : Union[str, Any] = tuple(__A ) __a : List[Any] = get_pairs(__A ) if not pairs: return token while True: __a : Tuple = min(__A , key=lambda __UpperCamelCase : self.bpe_ranks.get(__A , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __a : Dict = bigram __a : str = [] __a : Union[str, Any] = 0 while i < len(__A ): try: __a : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __a : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __a : str = tuple(__A ) __a : int = new_word if len(__A ) == 1: break else: __a : List[str] = get_pairs(__A ) __a : List[Any] = " ".join(__A ) __a : Optional[int] = word return word def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = [] for token in re.findall(self.pat , __A ): __a : Dict = "".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(""" """ ) ) return bpe_tokens def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' return self.decoder.get(__A ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = "".join(__A ) __a : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' if not os.path.isdir(__A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __a : List[Any] = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __a : str = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__A , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + """\n""" ) __a : str = 0 with open(__A , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) __a : int = token_index writer.write(""" """.join(__A ) + """\n""" ) index += 1 return vocab_file, merge_file def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : Tuple = [self.cls_token_id] __a : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Any = [self.sep_token_id] __a : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' __a : Optional[int] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): __a : Optional[int] = " " + text return (text, kwargs) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase = None , __UpperCamelCase = None , ): '''simple docstring''' __a : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: __a : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __a : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __a : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(__A ) if needs_to_be_padded: __a : int = len(__A ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __a : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": __a : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
703
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) import datasets __a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) __a : Tuple = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] __a : Any = object_detector(__UpperCamelCase , threshold=0.0 ) self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) __a : Union[str, Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """facebook/detr-resnet-50""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : Optional[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : int = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : List[str] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 0.9_9_8_5 __a : Union[str, Any] = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """Narsil/layoutlmv3-finetuned-funsd""" __a : List[Any] = 0.9_9_9_3 __a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase ) __a : List[str] = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
697
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=2 , __UpperCamelCase=99 , __UpperCamelCase=0 , __UpperCamelCase=32 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase="last" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=0 , ): '''simple docstring''' __a : List[str] = parent __a : Any = batch_size __a : List[Any] = seq_length __a : Optional[int] = is_training __a : Optional[int] = use_input_lengths __a : Optional[int] = use_token_type_ids __a : Any = use_labels __a : Optional[int] = gelu_activation __a : Dict = sinusoidal_embeddings __a : Union[str, Any] = causal __a : List[str] = asm __a : Dict = n_langs __a : List[Any] = vocab_size __a : List[Any] = n_special __a : List[str] = hidden_size __a : Tuple = num_hidden_layers __a : Optional[Any] = num_attention_heads __a : List[str] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : int = max_position_embeddings __a : Optional[Any] = type_sequence_label_size __a : List[Any] = initializer_range __a : Dict = num_labels __a : str = num_choices __a : Union[str, Any] = summary_type __a : int = use_proj __a : Optional[Any] = scope __a : int = bos_token_id def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Union[str, Any] = None if self.use_input_lengths: __a : List[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __a : List[Any] = None if self.use_token_type_ids: __a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __a : List[str] = None __a : Dict = None __a : Any = None if self.use_labels: __a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : Any = ids_tensor([self.batch_size] , 2 ).float() __a : int = ids_tensor([self.batch_size] , self.num_choices ) __a : str = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCamelCase ( self ): '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Any = XLMModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Union[str, Any] = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase ) __a : Union[str, Any] = model(__UpperCamelCase , langs=__UpperCamelCase ) __a : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Optional[Any] = XLMWithLMHeadModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Optional[int] = XLMForQuestionAnsweringSimple(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Any = model(__UpperCamelCase ) __a : Dict = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase ) __a : int = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : List[Any] = XLMForQuestionAnswering(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase ) __a : Optional[int] = model( __UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , ) __a : Tuple = model( __UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , ) (__a ) : int = result_with_labels.to_tuple() __a : Tuple = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase ) (__a ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Dict = XLMForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Union[str, Any] = model(__UpperCamelCase ) __a : Tuple = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : int = self.num_labels __a : List[Any] = XLMForTokenClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : int = self.num_choices __a : Dict = XLMForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Any = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.prepare_config_and_inputs() ( __a ) : str = config_and_inputs __a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase__ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase__ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase__ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' __a : List[Any] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __a : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) __a : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) return inputs_dict def __lowerCamelCase ( self ): '''simple docstring''' __a : str = XLMModelTester(self ) __a : int = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ): '''simple docstring''' self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual( [isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_attentions in attentions] , [True] * len(__UpperCamelCase ) ) self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__UpperCamelCase ): # adds PAD dummy token __a : Tuple = min_length + idx + 1 __a : Optional[Any] = min_length + idx + 1 __a : Union[str, Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCamelCase ) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=1 ): '''simple docstring''' self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual( [isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCamelCase ) , ) self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__UpperCamelCase ): # adds PAD dummy token __a : str = min_length + idx + 1 __a : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCamelCase ) , ) pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Union[str, Any] = XLMModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(__UpperCamelCase ) __a : Dict = torch.tensor([[14, 447]] , dtype=torch.long , device=__UpperCamelCase ) # the president __a : int = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __a : List[Any] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCamelCase )
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase__ = TextToVideoSDPipeline lowercase__ = TEXT_TO_IMAGE_PARAMS lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. lowercase__ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def __lowerCamelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) __a : Tuple = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , ) torch.manual_seed(0 ) __a : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __a : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) __a : List[Any] = CLIPTextModel(_a ) __a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __a : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ): '''simple docstring''' if str(_a ).startswith("""mps""" ): __a : Dict = torch.manual_seed(_a ) else: __a : str = torch.Generator(device=_a ).manual_seed(_a ) __a : Dict = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator __a : str = self.get_dummy_components() __a : Optional[Any] = TextToVideoSDPipeline(**_a ) __a : str = sd_pipe.to(_a ) sd_pipe.set_progress_bar_config(disable=_a ) __a : Optional[Any] = self.get_dummy_inputs(_a ) __a : int = """np""" __a : List[str] = sd_pipe(**_a ).frames __a : int = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __a : List[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a , expected_max_diff=1E-2 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" ) __a : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __a : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __a : List[Any] = pipe.to("""cuda""" ) __a : Optional[Any] = """Spiderman is surfing""" __a : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __a : List[str] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="""pt""" ).frames __a : str = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def __lowerCamelCase ( self ): '''simple docstring''' __a : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" ) __a : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" ) __a : Any = pipe.to("""cuda""" ) __a : Any = """Spiderman is surfing""" __a : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __a : List[str] = pipe(_a , generator=_a , num_inference_steps=2 , output_type="""pt""" ).frames __a : List[Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
705
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Any = params __a : Optional[Any] = np.array(__UpperCamelCase ) __a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __UpperCamelCase ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __lowerCamelCase ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.params.max_model_input_size __a : Union[str, Any] = self.lengths > max_len logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" ) def divide_chunks(__UpperCamelCase , __UpperCamelCase ): return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )] __a : int = [] __a : Union[str, Any] = [] if self.params.mlm: __a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: __a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __a : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase ) if sub_s[-1] != sep_id: __a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase ) assert len(__UpperCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__UpperCamelCase ) new_tok_ids.extend(__UpperCamelCase ) new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] ) __a : Dict = np.array(__UpperCamelCase ) __a : Tuple = np.array(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = len(self ) __a : List[str] = self.lengths > 11 __a : int = self.token_ids[indices] __a : Union[str, Any] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __lowerCamelCase ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __a : List[str] = self.params.special_tok_ids["""unk_token"""] __a : str = len(self ) __a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __a : Optional[Any] = (unk_occs / self.lengths) < 0.5 __a : List[str] = self.token_ids[indices] __a : Optional[int] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = [t[0] for t in batch] __a : str = [t[1] for t in batch] assert len(__UpperCamelCase ) == len(__UpperCamelCase ) # Max for paddings __a : Optional[int] = max(__UpperCamelCase ) # Pad token ids if self.params.mlm: __a : int = self.params.special_tok_ids["""pad_token"""] else: __a : Tuple = self.params.special_tok_ids["""unk_token"""] __a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids] assert len(tk_ ) == len(__UpperCamelCase ) assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ ) __a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) __a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs) return tk_t, lg_t
697
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) def _snake_case ( lowercase ) -> Any: if isinstance(snake_case__ , np.ndarray ): return list(tensor.shape ) __a : List[Any] = tf.shape(snake_case__ ) if tensor.shape == tf.TensorShape(snake_case__ ): return dynamic __a : int = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )] def _snake_case ( lowercase , lowercase = None , lowercase = None ) -> int: return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case__ , name=snake_case__ ) def _snake_case ( lowercase , lowercase , lowercase , lowercase=1E-5 , lowercase=-1 ) -> Tuple: if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ , snake_case__ ): raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" ) # Get mean and variance on the axis to be normalized __a : int = tf.nn.moments(snake_case__ , axes=[axis] , keepdims=snake_case__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis __a : Optional[Any] = [1] * inputs.shape.rank __a : Optional[Any] = shape_list(snake_case__ )[axis] __a : Optional[int] = tf.reshape(snake_case__ , snake_case__ ) __a : List[str] = tf.reshape(snake_case__ , snake_case__ ) # Compute layer normalization using the batch_normalization # function. __a : Optional[int] = tf.nn.batch_normalization( snake_case__ , snake_case__ , snake_case__ , offset=snake_case__ , scale=snake_case__ , variance_epsilon=snake_case__ , ) return outputs def _snake_case ( lowercase , lowercase=0 , lowercase=-1 ) -> Tuple: if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input __a : Any = tf.shape(snake_case__ ) __a : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) __a : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(snake_case__ , snake_case__ ) def _snake_case ( lowercase ) -> str: if not isinstance(snake_case__ , tf.Tensor ): __a : Optional[int] = tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: __a : int = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: __a : List[str] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) __a : Optional[int] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _snake_case ( lowercase , lowercase , lowercase = "input_ids" ) -> Optional[Any]: tf.debugging.assert_less( snake_case__ , tf.cast(snake_case__ , dtype=tensor.dtype ) , message=( F"""The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding """ F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : Union[str, Any] = 6_4_5_1_2 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. __a : List[str] = [x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( """The following attributes cannot be saved to HDF5 file because """ F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ F"""bytes: {bad_attributes}""" ) __a : Optional[int] = np.asarray(snake_case__ ) __a : Dict = 1 __a : Dict = np.array_split(snake_case__ , snake_case__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 __a : int = np.array_split(snake_case__ , snake_case__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(snake_case__ ): __a : Any = chunk_data else: __a : List[Any] = data def _snake_case ( lowercase , lowercase ) -> int: if name in group.attrs: __a : Optional[Any] = [n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs[name]] else: __a : List[str] = [] __a : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("""utf8""" ) if hasattr(snake_case__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] ) chunk_id += 1 return data def _snake_case ( lowercase ) -> int: def _expand_single_ad_tensor(lowercase ): if isinstance(snake_case__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(snake_case__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , snake_case__ )
706
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "" lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(self , **__UpperCamelCase ) __a : int = repo_info __a : int = token __a : Any = None def __lowerCamelCase ( self ): '''simple docstring''' if self.dir_cache is None: __a : Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __a : List[str] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : str = self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : int = PurePosixPath(path.strip("""/""" ) ) __a : List[str] = {} for p, f in self.dir_cache.items(): __a : str = PurePosixPath(p.strip("""/""" ) ) __a : Optional[int] = p.parent if root == path: __a : List[str] = f __a : str = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
697
0
'''simple docstring''' import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt') def _snake_case ( lowercase , lowercase , lowercase = 1_6_0_0_0 ) -> Optional[int]: __a : int = int(round(sample_rate * max_length ) ) if len(__snake_case ) <= sample_length: return wav __a : Any = randint(0 , len(__snake_case ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = field(default=__UpperCamelCase , metadata={"help": "Name of a dataset from the datasets package"} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "A file containing the training audio paths and labels."} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "A file containing the validation audio paths and labels."} ) lowercase__ = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) lowercase__ = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) lowercase__ = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) lowercase__ = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) lowercase__ = field( default=__UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowercase__ = field( default=__UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowercase__ = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) lowercase__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Name or path of preprocessor config."} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) lowercase__ = field( default=__UpperCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) lowercase__ = field( default=__UpperCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , __lowerCAmelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""" ) def _snake_case ( ) -> str: __a : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __a , __a , __a : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __a : Dict = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. __a : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __a : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. __a : Dict = DatasetDict() __a : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) __a : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy __a : int = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. __a : int = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) __a : Dict = feature_extractor.model_input_names[0] def train_transforms(lowercase ): __a : Any = [] for audio in batch[data_args.audio_column_name]: __a : List[str] = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__snake_case ) __a : List[str] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate ) __a : Optional[int] = {model_input_name: inputs.get(__snake_case )} __a : List[Any] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(lowercase ): __a : List[str] = [audio["""array"""] for audio in batch[data_args.audio_column_name]] __a : Optional[int] = feature_extractor(__snake_case , sampling_rate=feature_extractor.sampling_rate ) __a : Tuple = {model_input_name: inputs.get(__snake_case )} __a : Optional[Any] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. __a : Any = raw_datasets["""train"""].features[data_args.label_column_name].names __a , __a : Optional[Any] = {}, {} for i, label in enumerate(__snake_case ): __a : List[Any] = str(__snake_case ) __a : List[Any] = label # Load the accuracy metric from the datasets package __a : str = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(lowercase ): __a : List[str] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__snake_case , references=eval_pred.label_ids ) __a : int = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel=__snake_case , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __a : Tuple = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: __a : Any = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__snake_case , output_all_columns=__snake_case ) if training_args.do_eval: if data_args.max_eval_samples is not None: __a : str = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__snake_case , output_all_columns=__snake_case ) # Initialize our trainer __a : List[str] = Trainer( model=__snake_case , args=__snake_case , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__snake_case , tokenizer=__snake_case , ) # Training if training_args.do_train: __a : int = None if training_args.resume_from_checkpoint is not None: __a : Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: __a : Optional[int] = last_checkpoint __a : Tuple = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __a : Union[str, Any] = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub __a : Optional[int] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) if __name__ == "__main__": main()
707
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ): '''simple docstring''' __a : List[str] = parent __a : Tuple = batch_size __a : str = image_size __a : int = patch_size __a : Dict = num_channels __a : int = is_training __a : Dict = use_labels __a : Union[str, Any] = hidden_size __a : Dict = num_hidden_layers __a : Dict = backbone_out_indices __a : Optional[int] = num_attention_heads __a : List[str] = intermediate_size __a : Optional[Any] = hidden_act __a : Dict = hidden_dropout_prob __a : Tuple = attention_probs_dropout_prob __a : Any = initializer_range __a : Any = num_labels __a : Optional[Any] = backbone_featmap_shape __a : List[Any] = scope __a : List[str] = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __a : Union[str, Any] = (image_size // patch_size) ** 2 __a : List[str] = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Union[str, Any] = None if self.use_labels: __a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a : Tuple = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = DPTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Tuple = model(__UpperCamelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = self.num_labels __a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase__ = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = DPTModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Any = model_class(__UpperCamelCase ) __a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = True if model_class in get_values(__UpperCamelCase ): continue __a : str = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() __a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : List[Any] = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = False __a : Dict = True if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue __a : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.gradient_checkpointing_enable() model.train() __a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : Dict = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: __a : Any = model_class(config=__UpperCamelCase ) # Skip the check for the backbone __a : Optional[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __a : int = DPTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[int] = """add""" with self.assertRaises(__UpperCamelCase ): __a : int = DPTForDepthEstimation(__UpperCamelCase ) def _snake_case ( ) -> Any: __a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) __a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase ) __a : Union[str, Any] = prepare_img() __a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**__UpperCamelCase ) __a : int = outputs.predicted_depth # verify the predicted depth __a : Any = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __UpperCamelCase ) __a : int = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
697
0
'''simple docstring''' import math import qiskit def _snake_case ( lowercase = 1 , lowercase = 1 , lowercase = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) ): raise TypeError("""inputs must be integers.""" ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("""inputs must be positive.""" ) if ( (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != carry_in) ): raise ValueError("""inputs must be exact integers.""" ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("""inputs must be less or equal to 2.""" ) # build registers __a : Union[str, Any] = qiskit.QuantumRegister(4 , """qr""" ) __a : Optional[Any] = qiskit.ClassicalRegister(2 , """cr""" ) # list the entries __a : Tuple = [input_a, input_a, carry_in] __a : List[str] = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , UpperCAmelCase__ ) # measure the last two qbits __a : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" ) __a : Union[str, Any] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_0_0_0 ) return job.result().get_counts(UpperCAmelCase__ ) if __name__ == "__main__": print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
708
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __a : Optional[int] = Vector() def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3, 4] ) self.assertEqual(len(__UpperCamelCase ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = Vector([1, 2] ) __a : List[str] = Vector([1, 2, 3, 4, 5] ) __a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Vector([1, 2, 3] ) __a : Any = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3] ) __a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product __a : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Optional[int] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = Vector([1, 0, 0, 0, 0, 0] ) __a : Any = x.copy() self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __a : List[Any] = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
697
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE__ ( a__ , a__ ): lowercase__ = """convnextv2""" def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="gelu" , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0.0 , __UpperCamelCase=224 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(**lowerCAmelCase__ ) __a : List[Any] = num_channels __a : str = patch_size __a : Union[str, Any] = num_stages __a : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __a : str = [3, 3, 9, 3] if depths is None else depths __a : int = hidden_act __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = drop_path_rate __a : Dict = image_size __a : Any = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] __a : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
709
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __SCREAMING_SNAKE_CASE : List[str] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __SCREAMING_SNAKE_CASE : Optional[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __SCREAMING_SNAKE_CASE : Tuple = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __SCREAMING_SNAKE_CASE : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __SCREAMING_SNAKE_CASE : Optional[int] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _snake_case ( ) -> List[str]: __a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) ) __a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _snake_case ( lowercase = 1_0_0 ) -> Any: return (generate_random_hand() for _ in range(lowercase )) @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> int: assert PokerHand(lowercase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Any: assert PokerHand(lowercase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : Union[str, Any] = PokerHand(lowercase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Union[str, Any]: assert PokerHand(lowercase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def _snake_case ( lowercase , lowercase , lowercase ) -> int: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected def _snake_case ( ) -> Union[str, Any]: __a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS] __a : Optional[int] = poker_hands.copy() shuffle(lowercase ) __a : List[str] = chain(sorted(lowercase ) ) for index, hand in enumerate(lowercase ): assert hand == poker_hands[index] def _snake_case ( ) -> List[str]: # Test that five high straights are compared correctly. __a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=lowercase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _snake_case ( ) -> List[str]: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __a : Dict = PokerHand("""2C 4S AS 3D 5C""" ) __a : Dict = True __a : Optional[int] = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _snake_case ( ) -> Dict: # Problem number 54 from Project Euler # Testing from poker_hands.txt file __a : Tuple = 0 __a : int = os.path.abspath(os.path.dirname(lowercase ) ) __a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" ) with open(lowercase ) as file_hand: for line in file_hand: __a : Union[str, Any] = line[:1_4].strip() __a : Optional[Any] = line[1_5:].strip() __a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase ) __a : str = player.compare_with(lowercase ) if output == "Win": answer += 1 assert answer == 3_7_6
697
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE : List[str] = { '''configuration_table_transformer''': [ '''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TableTransformerConfig''', '''TableTransformerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = [ '''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TableTransformerForObjectDetection''', '''TableTransformerModel''', '''TableTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
710
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = ["""image_processor""", """tokenizer"""] lowercase__ = """LayoutLMv3ImageProcessor""" lowercase__ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""") def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCAmelCase_ , ) __a : Any = kwargs.pop("""feature_extractor""" ) __a : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor __a : Optional[int] = self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): __a : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension) __a : int = features["""words"""] __a : List[Any] = self.tokenizer( text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , ) # add pixel values __a : Any = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: __a : Optional[Any] = self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] ) __a : Any = images return encoded_inputs def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[int] = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f""" {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" ) return images_with_overflow def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) @property def __lowerCamelCase ( self ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , ) return self.image_processor_class @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , ) return self.image_processor
711
'''simple docstring''' from __future__ import annotations import bisect def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Union[str, Any] = len(lowercase ) while lo < hi: __a : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __a : int = mid + 1 else: __a : int = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Any = len(lowercase ) while lo < hi: __a : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __a : List[str] = mid + 1 else: __a : Any = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase ) -> int | None: __a : Dict = 0 __a : Any = len(lowercase ) - 1 while left <= right: __a : str = left + (right - left) // 2 __a : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __a : Optional[Any] = midpoint - 1 else: __a : Optional[int] = midpoint + 1 return None def _snake_case ( lowercase , lowercase ) -> int | None: __a : Optional[int] = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None: if right < left: return None __a : Any = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip() __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(',')) __SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n')) __SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
697
0
def _snake_case ( lowercase ) -> Optional[Any]: if not isinstance(_lowercase , _lowercase ): raise ValueError("""Input must be an integer""" ) if input_num <= 0: raise ValueError("""Input must be positive""" ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
712
'''simple docstring''' from itertools import product def _snake_case ( lowercase , lowercase ) -> list[int]: __a : Optional[int] = sides_number __a : Union[str, Any] = max_face_number * dice_number __a : Optional[Any] = [0] * (max_total + 1) __a : Dict = 1 __a : str = range(lowercase , max_face_number + 1 ) for dice_numbers in product(lowercase , repeat=lowercase ): __a : int = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ) -> float: __a : Tuple = total_frequency_distribution( sides_number=4 , dice_number=9 ) __a : Union[str, Any] = total_frequency_distribution( sides_number=6 , dice_number=6 ) __a : str = 0 __a : Dict = 9 __a : str = 4 * 9 __a : Any = 6 for peter_total in range(lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __a : str = (4**9) * (6**6) __a : List[Any] = peter_wins_count / total_games_number __a : List[Any] = round(lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
697
0
'''simple docstring''' def _snake_case ( lowercase ) -> list: __a : Any = False while is_sorted is False: # Until all the indices are traversed keep looping __a : List[str] = True for i in range(0 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: __a , __a : int = input_list[i + 1], input_list[i] # swapping if elements not in order __a : Optional[int] = False for i in range(1 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: __a , __a : Union[str, Any] = input_list[i + 1], input_list[i] # swapping if elements not in order __a : List[str] = False return input_list if __name__ == "__main__": print('Enter list to be sorted') __SCREAMING_SNAKE_CASE : Optional[int] = [int(x) for x in input().split()] # inputing elements of the list in one line __SCREAMING_SNAKE_CASE : Tuple = odd_even_sort(input_list) print('The sorted list is') print(sorted_list)
713
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Union[str, Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = len(__UpperCamelCase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCamelCase )}.""" ) # get prompt text embeddings __a : Tuple = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __a : Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Union[str, Any] = text_embeddings.shape __a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) __a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Optional[Any] = [""""""] elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=""" f""" {type(__UpperCamelCase )}.""" ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: __a : Tuple = negative_prompt __a : Any = text_input_ids.shape[-1] __a : List[str] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) __a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[str] = uncond_embeddings.shape[1] __a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 ) __a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Any = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device ) __a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: __a : Optional[int] = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) __a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Optional[Any] = latents_reference.to(self.device ) __a : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : int = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : Optional[Any] = 0 if dx < 0 else dx __a : Optional[Any] = 0 if dy < 0 else dy __a : Optional[int] = max(-dx , 0 ) __a : Optional[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[Any] = {} if accepts_eta: __a : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual __a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __a , __a : List[str] = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents __a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __a , __a : int = self.safety_checker( images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[int] = None if output_type == "pil": __a : str = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
697
0
'''simple docstring''' import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) __a : List[str] = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = vqa_pipeline(__UpperCamelCase , top_k=1 ) self.assertEqual( __UpperCamelCase , [ [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}], [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}], ] , ) @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) __a : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" __a : Tuple = """How many cats are there?""" __a : int = vqa_pipeline(image=__UpperCamelCase , question="""How many cats are there?""" , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}, {"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}] ) __a : List[str] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}, {"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}] ) @slow @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" ) __a : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png""" __a : Dict = """How many cats are there?""" __a : Union[str, Any] = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] ) __a : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] ) __a : Optional[int] = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass
714
'''simple docstring''' import numpy as np from PIL import Image def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : Any = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : Union[str, Any] = 0 __a : Dict = 0 __a : Optional[Any] = 0 __a : Tuple = 0 # compute the shape of the output matrix __a : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a : int = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : Optional[Any] = 0 __a : str = 0 return updated_arr def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : int = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : int = 0 __a : Optional[Any] = 0 __a : str = 0 __a : List[Any] = 0 # compute the shape of the output matrix __a : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a : Any = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : str = 0 __a : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __SCREAMING_SNAKE_CASE : str = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) def _snake_case ( lowercase ) -> List[Any]: # initialize config if "resnet-50" in model_name: __a : List[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: __a : Optional[int] = ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) __a : List[str] = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE_ , backbone_config=SCREAMING_SNAKE_CASE_ ) # set label attributes __a : List[str] = """panoptic""" in model_name if is_panoptic: __a : Dict = 2_5_0 else: __a : str = 9_1 __a : Dict = """huggingface/label-files""" __a : Optional[int] = """coco-detection-id2label.json""" __a : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) ) __a : Union[str, Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} __a : Union[str, Any] = idalabel __a : Union[str, Any] = {v: k for k, v in idalabel.items()} return config, is_panoptic def _snake_case ( lowercase ) -> Any: # here we list all keys to be renamed (original name on the left, our name on the right) __a : str = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def _snake_case ( lowercase , lowercase , lowercase ) -> Tuple: __a : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ ) __a : Optional[Any] = val def _snake_case ( lowercase , lowercase=False ) -> str: __a : List[Any] = """""" if is_panoptic: __a : Any = """detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __a : Optional[int] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) __a : Tuple = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __a : List[Any] = in_proj_weight[:2_5_6, :] __a : Dict = in_proj_bias[:2_5_6] __a : Tuple = in_proj_weight[2_5_6:5_1_2, :] __a : int = in_proj_bias[2_5_6:5_1_2] __a : str = in_proj_weight[-2_5_6:, :] __a : Tuple = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __a : str = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) __a : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __a : List[Any] = in_proj_weight[:2_5_6, :] __a : Union[str, Any] = in_proj_bias[:2_5_6] __a : str = in_proj_weight[2_5_6:5_1_2, :] __a : List[str] = in_proj_bias[2_5_6:5_1_2] __a : List[str] = in_proj_weight[-2_5_6:, :] __a : str = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention __a : List[str] = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) __a : Dict = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict __a : List[str] = in_proj_weight_cross_attn[:2_5_6, :] __a : Tuple = in_proj_bias_cross_attn[:2_5_6] __a : List[Any] = in_proj_weight_cross_attn[2_5_6:5_1_2, :] __a : Optional[Any] = in_proj_bias_cross_attn[2_5_6:5_1_2] __a : List[str] = in_proj_weight_cross_attn[-2_5_6:, :] __a : Dict = in_proj_bias_cross_attn[-2_5_6:] def _snake_case ( ) -> int: __a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" __a : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def _snake_case ( lowercase , lowercase=None , lowercase=False ) -> int: __a , __a : Optional[Any] = get_detr_config(SCREAMING_SNAKE_CASE_ ) # load original model from torch hub __a : Optional[Any] = { """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F"""Converting model {model_name}...""" ) __a : str = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE_ ).eval() __a : List[Any] = detr.state_dict() # rename keys for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE_ ): if is_panoptic: __a : Dict = """detr.""" + src rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __a : Optional[Any] = """detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): __a : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE_ ) __a : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __a : Optional[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ ) __a : Any = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: __a : Dict = state_dict.pop(SCREAMING_SNAKE_CASE_ ) __a : Union[str, Any] = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): __a : Any = state_dict.pop(SCREAMING_SNAKE_CASE_ ) __a : List[Any] = val # finally, create HuggingFace model and load state dict __a : Optional[Any] = DetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) model.eval() # verify our conversion on an image __a : Tuple = """coco_panoptic""" if is_panoptic else """coco_detection""" __a : int = DetrImageProcessor(format=SCREAMING_SNAKE_CASE_ ) __a : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" ) __a : Union[str, Any] = encoding["""pixel_values"""] __a : Union[str, Any] = detr(SCREAMING_SNAKE_CASE_ ) __a : Tuple = model(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F"""nielsr/{model_name}""" ) processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ): lowercase__ = IFImgaImgSuperResolutionPipeline lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} ) lowercase__ = PipelineTesterMixin.required_optional_params - {'latents'} def __lowerCamelCase ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ): '''simple docstring''' if str(A_ ).startswith("""mps""" ): __a : List[str] = torch.manual_seed(A_ ) else: __a : Optional[int] = torch.Generator(device=A_ ).manual_seed(A_ ) __a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ ) __a : Tuple = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ ) __a : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def __lowerCamelCase ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCamelCase ( self ): '''simple docstring''' self._test_save_load_local() def __lowerCamelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ): lowercase__ = """linear""" lowercase__ = """cosine""" lowercase__ = """cosine_with_restarts""" lowercase__ = """polynomial""" lowercase__ = """constant""" lowercase__ = """constant_with_warmup""" lowercase__ = """piecewise_constant""" def _snake_case ( lowercase , lowercase = -1 ) -> List[Any]: return LambdaLR(lowercase , lambda lowercase : 1 , last_epoch=lowercase ) def _snake_case ( lowercase , lowercase , lowercase = -1 ) -> List[str]: def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1.0 , lowercase ) ) return 1.0 return LambdaLR(lowercase , lowercase , last_epoch=lowercase ) def _snake_case ( lowercase , lowercase , lowercase = -1 ) -> Dict: __a : Union[str, Any] = {} __a : Optional[int] = step_rules.split(""",""" ) for rule_str in rule_list[:-1]: __a , __a : Tuple = rule_str.split(""":""" ) __a : Optional[Any] = int(lowercase ) __a : Any = float(lowercase ) __a : Dict = value __a : Union[str, Any] = float(rule_list[-1] ) def create_rules_function(lowercase , lowercase ): def rule_func(lowercase ) -> float: __a : Optional[int] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowercase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func __a : List[str] = create_rules_function(lowercase , lowercase ) return LambdaLR(lowercase , lowercase , last_epoch=lowercase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase=-1 ) -> List[str]: def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowercase , lowercase , lowercase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase = 0.5 , lowercase = -1 ) -> Optional[Any]: def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) __a : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) ) return LambdaLR(lowercase , lowercase , lowercase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase = 1 , lowercase = -1 ) -> str: def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) __a : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) ) return LambdaLR(lowercase , lowercase , lowercase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase=1E-7 , lowercase=1.0 , lowercase=-1 ) -> Optional[Any]: __a : List[Any] = optimizer.defaults["""lr"""] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowercase ): if current_step < num_warmup_steps: return float(lowercase ) / float(max(1 , lowercase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: __a : List[Any] = lr_init - lr_end __a : Dict = num_training_steps - num_warmup_steps __a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps __a : int = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowercase , lowercase , lowercase ) __SCREAMING_SNAKE_CASE : Any = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def _snake_case ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = 1 , lowercase = 1.0 , lowercase = -1 , ) -> int: __a : str = SchedulerType(lowercase ) __a : Optional[Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowercase , last_epoch=lowercase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowercase , step_rules=lowercase , last_epoch=lowercase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowercase , num_warmup_steps=lowercase , last_epoch=lowercase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , num_cycles=lowercase , last_epoch=lowercase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , power=lowercase , last_epoch=lowercase , ) return schedule_func( lowercase , num_warmup_steps=lowercase , num_training_steps=lowercase , last_epoch=lowercase )
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' from __future__ import annotations import math def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> int: if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(__lowerCAmelCase ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , ) def _snake_case ( ) -> Tuple: __a : str = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] __a : List[str] = math.log(len(__lowerCAmelCase ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' import os import sys import transformers __SCREAMING_SNAKE_CASE : List[Any] = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( __lowercase ): lowercase__ = ['''image_processor''', '''tokenizer'''] lowercase__ = '''AutoImageProcessor''' lowercase__ = '''AutoTokenizer''' def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' __a : int = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __a , ) __a : str = kwargs.pop("""feature_extractor""" ) __a : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__a , __a ) __a : Any = self.image_processor __a : Any = False def __call__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__a , **__a ) __a : List[Any] = kwargs.pop("""images""" , __a ) __a : List[str] = kwargs.pop("""text""" , __a ) if len(__a ) > 0: __a : Any = args[0] __a : List[Any] = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __a : str = self.image_processor(__a , *__a , **__a ) if text is not None: __a : Union[str, Any] = self.tokenizer(__a , **__a ) if text is None: return inputs elif images is None: return encodings else: __a : List[Any] = encodings["""input_ids"""] return inputs def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.decode(*__a , **__a ) @contextmanager def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) __a : List[str] = True __a : int = self.tokenizer yield __a : str = self.image_processor __a : Union[str, Any] = False def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=None ): '''simple docstring''' if added_vocab is None: __a : Optional[Any] = self.tokenizer.get_added_vocab() __a : Dict = {} while tokens: __a : Union[str, Any] = re.search(r"""<s_(.*?)>""" , __a , re.IGNORECASE ) if start_token is None: break __a : List[Any] = start_token.group(1 ) __a : Optional[int] = re.search(rf"""</s_{key}>""" , __a , re.IGNORECASE ) __a : int = start_token.group() if end_token is None: __a : Any = tokens.replace(__a , """""" ) else: __a : Any = end_token.group() __a : Tuple = re.escape(__a ) __a : Optional[Any] = re.escape(__a ) __a : Dict = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __a , re.IGNORECASE ) if content is not None: __a : List[str] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __a : Any = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a ) if value: if len(__a ) == 1: __a : str = value[0] __a : List[Any] = value else: # leaf nodes __a : List[Any] = [] for leaf in content.split(r"""<sep/>""" ): __a : Optional[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __a : Optional[Any] = leaf[1:-2] # for categorical special tokens output[key].append(__a ) if len(output[key] ) == 1: __a : Optional[Any] = output[key][0] __a : Tuple = tokens[tokens.find(__a ) + len(__a ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a ) if len(__a ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , ) return self.image_processor_class @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , ) return self.image_processor
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Dict = OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __SCREAMING_SNAKE_CASE : int = OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __SCREAMING_SNAKE_CASE : Any = OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __SCREAMING_SNAKE_CASE : int = OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __SCREAMING_SNAKE_CASE : List[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __SCREAMING_SNAKE_CASE : Dict = OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __SCREAMING_SNAKE_CASE : List[str] = OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __SCREAMING_SNAKE_CASE : Tuple = OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __SCREAMING_SNAKE_CASE : int = OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_MAPPING __SCREAMING_SNAKE_CASE : Dict = auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING __SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING __SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : Dict = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __SCREAMING_SNAKE_CASE : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __SCREAMING_SNAKE_CASE : Any = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): lowercase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : List[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
'''simple docstring''' import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=64 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=64 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=4 , __UpperCamelCase=None , ): '''simple docstring''' __a : Union[str, Any] = parent __a : List[Any] = batch_size __a : Any = seq_length __a : Optional[Any] = is_training __a : Union[str, Any] = use_input_mask __a : Optional[Any] = use_token_type_ids __a : List[str] = use_labels __a : Dict = vocab_size __a : str = hidden_size __a : List[str] = num_hidden_layers __a : Dict = num_attention_heads __a : Tuple = intermediate_size __a : Tuple = hidden_act __a : Optional[Any] = hidden_dropout_prob __a : Dict = attention_probs_dropout_prob __a : Any = max_position_embeddings __a : str = type_vocab_size __a : Any = type_sequence_label_size __a : Any = initializer_range __a : Optional[Any] = num_labels __a : List[Any] = num_choices __a : Any = scope def __lowerCamelCase ( self ): '''simple docstring''' return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : List[str] = None if self.use_input_mask: __a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Dict = None __a : List[Any] = None __a : Optional[int] = None if self.use_labels: __a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __a : Tuple = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = MPNetModel(config=A__ ) model.to(A__ ) model.eval() __a : Tuple = model(A__ , A__ ) __a : Optional[Any] = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = MPNetForQuestionAnswering(config=A__ ) model.to(A__ ) model.eval() __a : int = model( A__ , attention_mask=A__ , start_positions=A__ , end_positions=A__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : List[Any] = MPNetForSequenceClassification(A__ ) model.to(A__ ) model.eval() __a : str = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = self.num_choices __a : Optional[Any] = MPNetForMultipleChoice(config=A__ ) model.to(A__ ) model.eval() __a : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : List[Any] = model( A__ , attention_mask=A__ , labels=A__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : str = MPNetForTokenClassification(config=A__ ) model.to(A__ ) model.eval() __a : List[str] = model(A__ , attention_mask=A__ , labels=A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Union[str, Any] = config_and_inputs __a : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __a , __a , unittest.TestCase ): lowercase__ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": MPNetModel, "fill-mask": MPNetForMaskedLM, "question-answering": MPNetForQuestionAnswering, "text-classification": MPNetForSequenceClassification, "token-classification": MPNetForTokenClassification, "zero-shot": MPNetForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = True def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = MPNetModelTester(self ) __a : str = ConfigTester(self , config_class=A__ , hidden_size=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*A__ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*A__ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*A__ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*A__ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*A__ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = MPNetModel.from_pretrained("""microsoft/mpnet-base""" ) __a : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __a : Optional[Any] = model(A__ )[0] __a : Tuple = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , A__ ) __a : str = torch.tensor( [[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1E-4 ) )
700
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : int = self.unet.config.sample_size __a : Optional[int] = (batch_size, 3, img_size, img_size) __a : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a : Dict = self.scheduler.schedule[t] __a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a : Tuple = self.scheduler.step_correct( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , ) __a : Tuple = step_output.prev_sample __a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
697
0
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib __SCREAMING_SNAKE_CASE : int = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.WARNING def _snake_case ( ) -> Tuple: __a : Dict = os.getenv("""DATASETS_VERBOSITY""" , _SCREAMING_SNAKE_CASE ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def _snake_case ( ) -> str: return __name__.split(""".""" )[0] def _snake_case ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def _snake_case ( ) -> None: # Apply our default configuration to the library root logger. __a : Optional[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def _snake_case ( ) -> None: __a : str = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def _snake_case ( lowercase = None ) -> logging.Logger: if name is None: __a : List[str] = _get_library_name() return logging.getLogger(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> int: return _get_library_root_logger().getEffectiveLevel() def _snake_case ( lowercase ) -> None: _get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> Dict: return set_verbosity(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> List[str]: return set_verbosity(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> Optional[Any]: return set_verbosity(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> str: return set_verbosity(_SCREAMING_SNAKE_CASE ) def _snake_case ( ) -> None: __a : Tuple = False def _snake_case ( ) -> None: __a : Dict = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class SCREAMING_SNAKE_CASE__ : def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): # pylint: disable=unused-argument '''simple docstring''' __a : int = args[0] if args else None def __iter__( self ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self , __UpperCamelCase ): '''simple docstring''' def empty_fn(*__UpperCamelCase , **__UpperCamelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): '''simple docstring''' return self def __exit__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return __SCREAMING_SNAKE_CASE : str = True class SCREAMING_SNAKE_CASE__ : def __call__( self , *__UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase ) else: return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' __a : str = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __SCREAMING_SNAKE_CASE : str = _tqdm_cls() def _snake_case ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def _snake_case ( ) -> List[str]: global _tqdm_active __a : Optional[Any] = True def _snake_case ( ) -> Any: global _tqdm_active __a : Dict = False
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
'''simple docstring''' import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class SCREAMING_SNAKE_CASE__ : lowercase__ = None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = self.feature_extraction_class(**self.feat_extract_dict ) __a : Any = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __a : Union[str, Any] = os.path.join(__UpperCamelCase , """feat_extract.json""" ) feat_extract_first.to_json_file(__UpperCamelCase ) __a : List[Any] = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __a : Union[str, Any] = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) __a : Dict = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
702
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE : Union[str, Any] = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
703
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) import datasets __a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) __a : Tuple = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] __a : Any = object_detector(__UpperCamelCase , threshold=0.0 ) self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) __a : Union[str, Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """facebook/detr-resnet-50""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : Optional[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : int = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : List[str] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 0.9_9_8_5 __a : Union[str, Any] = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """Narsil/layoutlmv3-finetuned-funsd""" __a : List[Any] = 0.9_9_9_3 __a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase ) __a : List[str] = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
697
0
'''simple docstring''' def _snake_case ( lowercase ) -> Any: __a : Union[str, Any] = [] __a : str = set({"""(""", """[""", """{"""} ) __a : List[str] = set({""")""", """]""", """}"""} ) __a : Union[str, Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(SCREAMING_SNAKE_CASE__ ) == 0 or (len(SCREAMING_SNAKE_CASE__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(SCREAMING_SNAKE_CASE__ ) == 0 def _snake_case ( ) -> Any: __a : Optional[Any] = input("""Enter sequence of brackets: """ ) if is_balanced(SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__ , """is balanced""" ) else: print(SCREAMING_SNAKE_CASE__ , """is not balanced""" ) if __name__ == "__main__": main()
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _snake_case ( lowercase ) -> List[Any]: __a : Any = int(a_ ) __a : Optional[int] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0 return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}""" def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase=3_0_0 ) -> Union[str, Any]: # docstyle-ignore return F"""\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n """ def _snake_case ( lowercase ) -> List[str]: __a : int = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __a : List[str] = F"""{elt:.6f}""" if isinstance(a_ , a_ ) else str(a_ ) html_code += F""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class SCREAMING_SNAKE_CASE__ : lowercase__ = 5 lowercase__ = 0.2 def __init__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 300 , ): '''simple docstring''' __a : Dict = total __a : Tuple = '''''' if prefix is None else prefix __a : Tuple = leave __a : List[Any] = parent __a : int = width __a : Optional[Any] = None __a : str = None __a : List[Any] = None def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = value if comment is not None: __a : int = comment if self.last_value is None: __a : Dict = time.time() __a : List[str] = value __a : Dict = None __a : Tuple = self.warmup __a : Dict = 1 self.update_bar(__snake_case ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __a : Dict = time.time() __a : int = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __a : Optional[int] = self.elapsed_time / (value - self.start_value) else: __a : Optional[int] = None if value >= self.total: __a : List[str] = self.total __a : List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: __a : List[str] = self.average_time_per_item * (self.total - value) self.update_bar(__snake_case ) __a : Optional[Any] = value __a : Tuple = current_time if self.average_time_per_item is None: __a : Optional[int] = 1 else: __a : Union[str, Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ): '''simple docstring''' __a : List[Any] = ''' ''' * (len(str(self.total ) ) - len(str(__snake_case ) )) + str(__snake_case ) if self.elapsed_time is None: __a : str = f"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: __a : Optional[Any] = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}""" else: __a : str = ( f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <""" f""" {format_time(self.predicted_remaining )}""" ) self.label += f""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]""" self.display() def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __a : Union[str, Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def __lowerCamelCase ( self ): '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML("""""" ) ) class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ): def __init__( self , __UpperCamelCase , __UpperCamelCase=None ): '''simple docstring''' super().__init__(__snake_case ) __a : List[Any] = None if column_names is None else [column_names] __a : Optional[int] = None def __lowerCamelCase ( self ): '''simple docstring''' __a : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __a : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=__snake_case ) else: self.output.update(disp.HTML(self.html_code ) ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.inner_table is None: __a : Union[str, Any] = [list(values.keys() ), list(values.values() )] else: __a : Optional[int] = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__snake_case ) __a : List[Any] = columns self.inner_table.append([values[c] for c in columns] ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=300 ): '''simple docstring''' __a : Any = NotebookProgressBar(__snake_case , prefix=__snake_case , parent=self , width=__snake_case ) return self.child_bar def __lowerCamelCase ( self ): '''simple docstring''' __a : int = None self.display() class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE ): def __init__( self ): '''simple docstring''' __a : Optional[Any] = None __a : int = None __a : Optional[int] = False def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' __a : int = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' __a : List[Any] = 0 __a : List[str] = 0 __a : Tuple = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append("""Validation Loss""" ) __a : Optional[Any] = NotebookTrainingTracker(state.max_steps , __snake_case ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' __a : List[Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) __a : Optional[int] = False def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' if not has_length(__snake_case ): return if self.prediction_bar is None: if self.training_tracker is not None: __a : List[str] = self.training_tracker.add_child(len(__snake_case ) ) else: __a : str = NotebookProgressBar(len(__snake_case ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() __a : List[str] = None def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __a : Tuple = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy __a : Dict = state.global_step self.training_tracker.write_line(__snake_case ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' if self.training_tracker is not None: __a : Optional[Any] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: __a : Any = log['''loss'''] break if self.first_column == "Epoch": __a : Tuple = int(state.epoch ) else: __a : Optional[int] = state.global_step __a : Tuple = '''eval''' for k in metrics: if k.endswith("""_loss""" ): __a : Union[str, Any] = re.sub(r"""\_loss$""" , """""" , __snake_case ) __a : List[Any] = metrics.pop("""total_flos""" , __snake_case ) __a : Union[str, Any] = metrics.pop("""epoch""" , __snake_case ) __a : List[Any] = metrics.pop(f"""{metric_key_prefix}_runtime""" , __snake_case ) __a : Dict = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __snake_case ) __a : List[Any] = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __snake_case ) __a : List[str] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __snake_case ) for k, v in metrics.items(): if k == f"""{metric_key_prefix}_loss""": __a : List[str] = v else: __a : Any = k.split("""_""" ) __a : Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]] ) __a : Union[str, Any] = v self.training_tracker.write_line(__snake_case ) self.training_tracker.remove_child() __a : Dict = None # Evaluation takes a long time so we should force the next update. __a : List[Any] = True def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self.training_tracker.update( state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=__snake_case ) __a : Any = None
705
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Any = params __a : Optional[Any] = np.array(__UpperCamelCase ) __a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __UpperCamelCase ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __lowerCamelCase ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.params.max_model_input_size __a : Union[str, Any] = self.lengths > max_len logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" ) def divide_chunks(__UpperCamelCase , __UpperCamelCase ): return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )] __a : int = [] __a : Union[str, Any] = [] if self.params.mlm: __a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: __a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __a : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase ) if sub_s[-1] != sep_id: __a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase ) assert len(__UpperCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__UpperCamelCase ) new_tok_ids.extend(__UpperCamelCase ) new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] ) __a : Dict = np.array(__UpperCamelCase ) __a : Tuple = np.array(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = len(self ) __a : List[str] = self.lengths > 11 __a : int = self.token_ids[indices] __a : Union[str, Any] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __lowerCamelCase ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __a : List[str] = self.params.special_tok_ids["""unk_token"""] __a : str = len(self ) __a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __a : Optional[Any] = (unk_occs / self.lengths) < 0.5 __a : List[str] = self.token_ids[indices] __a : Optional[int] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = [t[0] for t in batch] __a : str = [t[1] for t in batch] assert len(__UpperCamelCase ) == len(__UpperCamelCase ) # Max for paddings __a : Optional[int] = max(__UpperCamelCase ) # Pad token ids if self.params.mlm: __a : int = self.params.special_tok_ids["""pad_token"""] else: __a : Tuple = self.params.special_tok_ids["""unk_token"""] __a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids] assert len(tk_ ) == len(__UpperCamelCase ) assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ ) __a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) __a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs) return tk_t, lg_t
697
0
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # TODO Update this __SCREAMING_SNAKE_CASE : int = { """facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""", # See all ESM models at https://huggingface.co/models?filter=esm } class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "esm" def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1026 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase="absolute" , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase , mask_token_id=__UpperCamelCase , **__UpperCamelCase ) __a : Dict = vocab_size __a : Any = hidden_size __a : Tuple = num_hidden_layers __a : List[str] = num_attention_heads __a : Tuple = intermediate_size __a : Union[str, Any] = hidden_dropout_prob __a : int = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : str = initializer_range __a : List[Any] = layer_norm_eps __a : Union[str, Any] = position_embedding_type __a : Tuple = use_cache __a : List[str] = emb_layer_norm_before __a : Union[str, Any] = token_dropout __a : Any = is_folding_model if is_folding_model: if esmfold_config is None: logger.info("""No esmfold_config supplied for folding model, using default values.""" ) __a : Tuple = EsmFoldConfig() elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = EsmFoldConfig(**__UpperCamelCase ) __a : List[str] = esmfold_config if vocab_list is None: logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" ) __a : Union[str, Any] = get_default_vocab_list() else: __a : Optional[Any] = vocab_list else: __a : Optional[Any] = None __a : List[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __UpperCamelCase ): raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = super().to_dict() if isinstance(self.esmfold_config , __UpperCamelCase ): __a : List[Any] = self.esmfold_config.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = None lowercase__ = True lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = 0 lowercase__ = True lowercase__ = False lowercase__ = 1_28 lowercase__ = None def __lowerCamelCase ( self ): '''simple docstring''' if self.trunk is None: __a : Any = TrunkConfig() elif isinstance(self.trunk , __UpperCamelCase ): __a : Tuple = TrunkConfig(**self.trunk ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = asdict(self ) __a : Optional[int] = self.trunk.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = 48 lowercase__ = 10_24 lowercase__ = 1_28 lowercase__ = 32 lowercase__ = 32 lowercase__ = 32 lowercase__ = 0 lowercase__ = 0 lowercase__ = False lowercase__ = 4 lowercase__ = 1_28 lowercase__ = None def __lowerCamelCase ( self ): '''simple docstring''' if self.structure_module is None: __a : List[Any] = StructureModuleConfig() elif isinstance(self.structure_module , __UpperCamelCase ): __a : List[str] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( """`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got""" f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( """`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got""" f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) __a : Optional[int] = self.sequence_state_dim // self.sequence_head_width __a : Optional[Any] = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( """`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got""" f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( """`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got""" f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = asdict(self ) __a : str = self.structure_module.to_dict() return output @dataclass class SCREAMING_SNAKE_CASE__ : lowercase__ = 3_84 lowercase__ = 1_28 lowercase__ = 16 lowercase__ = 1_28 lowercase__ = 12 lowercase__ = 4 lowercase__ = 8 lowercase__ = 0.1 lowercase__ = 8 lowercase__ = 1 lowercase__ = 2 lowercase__ = 7 lowercase__ = 10 lowercase__ = 1E-8 lowercase__ = 1E5 def __lowerCamelCase ( self ): '''simple docstring''' return asdict(self ) def _snake_case ( ) -> List[Any]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
706
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "" lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(self , **__UpperCamelCase ) __a : int = repo_info __a : int = token __a : Any = None def __lowerCamelCase ( self ): '''simple docstring''' if self.dir_cache is None: __a : Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __a : List[str] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : str = self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : int = PurePosixPath(path.strip("""/""" ) ) __a : List[str] = {} for p, f in self.dir_cache.items(): __a : str = PurePosixPath(p.strip("""/""" ) ) __a : Optional[int] = p.parent if root == path: __a : List[str] = f __a : str = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
697
0
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): lowercase__ = MgpstrTokenizer lowercase__ = False lowercase__ = {} lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() # fmt: off __a : Tuple = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on __a : List[str] = dict(zip(__a , range(len(__a ) ) ) ) __a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__a ) + """\n""" ) def __lowerCamelCase ( self , **__UpperCamelCase ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : Tuple = "tester" __a : Optional[int] = "tester" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.get_tokenizers(do_lower_case=__a ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __a : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"""cls_token""": special_token} ) __a : List[str] = tokenizer.encode([special_token] , add_special_tokens=__a ) self.assertEqual(len(__a ) , 1 ) __a : str = tokenizer.decode(__a , skip_special_tokens=__a ) self.assertTrue(special_token not in decoded ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __a : Dict = self.get_input_output_texts(__a ) __a : Tuple = tokenizer.tokenize(__a ) __a : Dict = tokenizer.convert_tokens_to_ids(__a ) __a : str = tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) __a : Dict = tokenizer.convert_ids_to_tokens(__a ) self.assertNotEqual(len(__a ) , 0 ) __a : Dict = tokenizer.decode(__a ) self.assertIsInstance(__a , __a ) self.assertEqual(text_a.replace(""" """ , """""" ) , __a ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass
707
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ): '''simple docstring''' __a : List[str] = parent __a : Tuple = batch_size __a : str = image_size __a : int = patch_size __a : Dict = num_channels __a : int = is_training __a : Dict = use_labels __a : Union[str, Any] = hidden_size __a : Dict = num_hidden_layers __a : Dict = backbone_out_indices __a : Optional[int] = num_attention_heads __a : List[str] = intermediate_size __a : Optional[Any] = hidden_act __a : Dict = hidden_dropout_prob __a : Tuple = attention_probs_dropout_prob __a : Any = initializer_range __a : Any = num_labels __a : Optional[Any] = backbone_featmap_shape __a : List[Any] = scope __a : List[str] = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __a : Union[str, Any] = (image_size // patch_size) ** 2 __a : List[str] = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Union[str, Any] = None if self.use_labels: __a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a : Tuple = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = DPTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Tuple = model(__UpperCamelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = self.num_labels __a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase__ = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = DPTModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Any = model_class(__UpperCamelCase ) __a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = True if model_class in get_values(__UpperCamelCase ): continue __a : str = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() __a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : List[Any] = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = False __a : Dict = True if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue __a : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.gradient_checkpointing_enable() model.train() __a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : Dict = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: __a : Any = model_class(config=__UpperCamelCase ) # Skip the check for the backbone __a : Optional[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __a : int = DPTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[int] = """add""" with self.assertRaises(__UpperCamelCase ): __a : int = DPTForDepthEstimation(__UpperCamelCase ) def _snake_case ( ) -> Any: __a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) __a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase ) __a : Union[str, Any] = prepare_img() __a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**__UpperCamelCase ) __a : int = outputs.predicted_depth # verify the predicted depth __a : Any = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __UpperCamelCase ) __a : int = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
697
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[int] = 8.314_462 # Unit - J mol-1 K-1 def _snake_case ( lowercase , lowercase , lowercase ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def _snake_case ( lowercase , lowercase , lowercase ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("""Invalid inputs. Enter positive value.""" ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
708
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __a : Optional[int] = Vector() def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3, 4] ) self.assertEqual(len(__UpperCamelCase ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = Vector([1, 2] ) __a : List[str] = Vector([1, 2, 3, 4, 5] ) __a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Vector([1, 2, 3] ) __a : Any = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3] ) __a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product __a : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Optional[int] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = Vector([1, 0, 0, 0, 0, 0] ) __a : Any = x.copy() self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __a : List[Any] = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
697
0
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __SCREAMING_SNAKE_CASE : Optional[int] = datasets.logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : int = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ __SCREAMING_SNAKE_CASE : List[str] = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project's README at https://github.com/google-research/bleurt#readme for more information. """ __SCREAMING_SNAKE_CASE : Optional[Any] = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: 'scores': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ __SCREAMING_SNAKE_CASE : str = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def __lowerCamelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) __a : Any = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: __a : List[Any] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __a : Optional[int] = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer __a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __a : List[Any] = score.BleurtScorer(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Tuple = self.scorer.score(references=_lowerCAmelCase , candidates=_lowerCAmelCase ) return {"scores": scores}
709
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __SCREAMING_SNAKE_CASE : List[str] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __SCREAMING_SNAKE_CASE : Optional[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __SCREAMING_SNAKE_CASE : Tuple = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __SCREAMING_SNAKE_CASE : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __SCREAMING_SNAKE_CASE : Optional[int] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _snake_case ( ) -> List[str]: __a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) ) __a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _snake_case ( lowercase = 1_0_0 ) -> Any: return (generate_random_hand() for _ in range(lowercase )) @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> int: assert PokerHand(lowercase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Any: assert PokerHand(lowercase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : Union[str, Any] = PokerHand(lowercase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Union[str, Any]: assert PokerHand(lowercase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def _snake_case ( lowercase , lowercase , lowercase ) -> int: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected def _snake_case ( ) -> Union[str, Any]: __a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS] __a : Optional[int] = poker_hands.copy() shuffle(lowercase ) __a : List[str] = chain(sorted(lowercase ) ) for index, hand in enumerate(lowercase ): assert hand == poker_hands[index] def _snake_case ( ) -> List[str]: # Test that five high straights are compared correctly. __a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=lowercase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _snake_case ( ) -> List[str]: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __a : Dict = PokerHand("""2C 4S AS 3D 5C""" ) __a : Dict = True __a : Optional[int] = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _snake_case ( ) -> Dict: # Problem number 54 from Project Euler # Testing from poker_hands.txt file __a : Tuple = 0 __a : int = os.path.abspath(os.path.dirname(lowercase ) ) __a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" ) with open(lowercase ) as file_hand: for line in file_hand: __a : Union[str, Any] = line[:1_4].strip() __a : Optional[Any] = line[1_5:].strip() __a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase ) __a : str = player.compare_with(lowercase ) if output == "Win": answer += 1 assert answer == 3_7_6
697
0
'''simple docstring''' import csv import tweepy # Twitter API credentials __SCREAMING_SNAKE_CASE : Dict = '' __SCREAMING_SNAKE_CASE : Dict = '' __SCREAMING_SNAKE_CASE : List[str] = '' __SCREAMING_SNAKE_CASE : List[str] = '' def _snake_case ( lowercase ) -> Tuple: # authorize twitter, initialize tweepy __a : str = tweepy.OAuthHandler(snake_case_ , snake_case_ ) auth.set_access_token(snake_case_ , snake_case_ ) __a : int = tweepy.API(snake_case_ ) # initialize a list to hold all the tweepy Tweets __a : Union[str, Any] = [] # make initial request for most recent tweets (200 is the maximum allowed count) __a : List[str] = api.user_timeline(screen_name=snake_case_ , count=2_0_0 ) # save most recent tweets alltweets.extend(snake_case_ ) # save the id of the oldest tweet less one __a : List[str] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(snake_case_ ) > 0: print(F"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates __a : int = api.user_timeline( screen_name=snake_case_ , count=2_0_0 , max_id=snake_case_ ) # save most recent tweets alltweets.extend(snake_case_ ) # update the id of the oldest tweet less one __a : Union[str, Any] = alltweets[-1].id - 1 print(F"""...{len(snake_case_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv __a : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"""new_{screen_name}_tweets.csv""" , """w""" ) as f: __a : List[Any] = csv.writer(snake_case_ ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(snake_case_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
710
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
'''simple docstring''' def _snake_case ( lowercase ) -> List[Any]: __a : Optional[Any] = 0 __a : Optional[Any] = len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _snake_case ( lowercase ) -> Tuple: if len(__UpperCamelCase ) <= 1: return arr, 0 __a : Optional[int] = len(__UpperCamelCase ) // 2 __a : List[str] = arr[0:mid] __a : Union[str, Any] = arr[mid:] __a : List[Any] = count_inversions_recursive(__UpperCamelCase ) __a : int = count_inversions_recursive(__UpperCamelCase ) __a : Dict = _count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) __a : Any = inversion_p + inversions_q + cross_inversions return c, num_inversions def _snake_case ( lowercase , lowercase ) -> Dict: __a : str = [] __a : Tuple = 0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _snake_case ( ) -> Tuple: __a : List[str] = [1_0, 2, 1, 5, 5, 2, 1_1] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) __a : int = count_inversions_bf(__UpperCamelCase ) __a : int = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() __a : Optional[Any] = count_inversions_bf(__UpperCamelCase ) __a : Dict = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions __a : Union[str, Any] = [] __a : Union[str, Any] = count_inversions_bf(__UpperCamelCase ) __a : Any = count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
711
'''simple docstring''' from __future__ import annotations import bisect def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Union[str, Any] = len(lowercase ) while lo < hi: __a : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __a : int = mid + 1 else: __a : int = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Any = len(lowercase ) while lo < hi: __a : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __a : List[str] = mid + 1 else: __a : Any = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase ) -> int | None: __a : Dict = 0 __a : Any = len(lowercase ) - 1 while left <= right: __a : str = left + (right - left) // 2 __a : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __a : Optional[Any] = midpoint - 1 else: __a : Optional[int] = midpoint + 1 return None def _snake_case ( lowercase , lowercase ) -> int | None: __a : Optional[int] = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None: if right < left: return None __a : Any = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip() __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(',')) __SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n')) __SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
697
0
import logging from transformers.configuration_utils import PretrainedConfig __SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ): lowercase__ = "masked_bert" def __init__( self , __UpperCamelCase=3_0522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="topK" , __UpperCamelCase="constant" , __UpperCamelCase=0.0 , **__UpperCamelCase , ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase ) __a : List[str] = vocab_size __a : int = hidden_size __a : List[str] = num_hidden_layers __a : Any = num_attention_heads __a : List[str] = hidden_act __a : List[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : List[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : List[str] = initializer_range __a : Optional[int] = layer_norm_eps __a : Optional[Any] = pruning_method __a : Any = mask_init __a : Optional[Any] = mask_scale
712
'''simple docstring''' from itertools import product def _snake_case ( lowercase , lowercase ) -> list[int]: __a : Optional[int] = sides_number __a : Union[str, Any] = max_face_number * dice_number __a : Optional[Any] = [0] * (max_total + 1) __a : Dict = 1 __a : str = range(lowercase , max_face_number + 1 ) for dice_numbers in product(lowercase , repeat=lowercase ): __a : int = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ) -> float: __a : Tuple = total_frequency_distribution( sides_number=4 , dice_number=9 ) __a : Union[str, Any] = total_frequency_distribution( sides_number=6 , dice_number=6 ) __a : str = 0 __a : Dict = 9 __a : str = 4 * 9 __a : Any = 6 for peter_total in range(lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __a : str = (4**9) * (6**6) __a : List[Any] = peter_wins_count / total_games_number __a : List[Any] = round(lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
697
0
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE : List[Any] = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } __SCREAMING_SNAKE_CASE : str = {"allegro/herbert-base-cased": 514} __SCREAMING_SNAKE_CASE : List[str] = {} class SCREAMING_SNAKE_CASE__ ( _A ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_INIT_CONFIGURATION lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = HerbertTokenizer def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase="</s>" , **__UpperCamelCase , ): '''simple docstring''' super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , sep_token=__UpperCamelCase , **__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Optional[int] = [self.cls_token_id] __a : Any = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Any = [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : List[str] = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase )
713
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Union[str, Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = len(__UpperCamelCase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCamelCase )}.""" ) # get prompt text embeddings __a : Tuple = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __a : Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Union[str, Any] = text_embeddings.shape __a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) __a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Optional[Any] = [""""""] elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=""" f""" {type(__UpperCamelCase )}.""" ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: __a : Tuple = negative_prompt __a : Any = text_input_ids.shape[-1] __a : List[str] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) __a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[str] = uncond_embeddings.shape[1] __a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 ) __a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Any = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device ) __a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: __a : Optional[int] = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) __a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Optional[Any] = latents_reference.to(self.device ) __a : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : int = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : Optional[Any] = 0 if dx < 0 else dx __a : Optional[Any] = 0 if dy < 0 else dy __a : Optional[int] = max(-dx , 0 ) __a : Optional[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[Any] = {} if accepts_eta: __a : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual __a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __a , __a : List[str] = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents __a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __a , __a : int = self.safety_checker( images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[int] = None if output_type == "pil": __a : str = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
697
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE__ ( __lowercase ): lowercase__ = ['image_processor', 'tokenizer'] lowercase__ = 'CLIPImageProcessor' lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' __a : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) __a : Optional[Any] = kwargs.pop("""feature_extractor""" ) __a : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __a : Dict = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: __a : Union[str, Any] = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: __a : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__A , **__A ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' return self.tokenizer.decode(*__A , **__A ) @property def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.tokenizer.model_input_names __a : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def __lowerCamelCase ( self ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
714
'''simple docstring''' import numpy as np from PIL import Image def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : Any = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : Union[str, Any] = 0 __a : Dict = 0 __a : Optional[Any] = 0 __a : Tuple = 0 # compute the shape of the output matrix __a : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a : int = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : Optional[Any] = 0 __a : str = 0 return updated_arr def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : int = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : int = 0 __a : Optional[Any] = 0 __a : str = 0 __a : List[Any] = 0 # compute the shape of the output matrix __a : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a : Any = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : str = 0 __a : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __SCREAMING_SNAKE_CASE : str = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
0
'''simple docstring''' import operator as op def _snake_case ( lowercase ) -> Union[str, Any]: __a : Optional[int] = [] __a : List[str] = lambda lowercase , lowercase : int(x / y ) # noqa: E731 integer division operation __a : List[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(1_2 ) , """Stack""" , sep=""" | """ ) print("""-""" * (3_0 + len(UpperCamelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(UpperCamelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(1_2 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ ) else: __a : Dict = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(1_2 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ ) __a : Any = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(1_2 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ ) stack.append( str(opr[x](int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(1_2 ) , """,""".join(UpperCamelCase__ ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ') print('\n\tResult = ', solve(Postfix))
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _snake_case ( lowercase ) -> Union[str, Any]: assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _snake_case ( ) -> List[Any]: assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _snake_case ( ) -> List[Any]: __a : List[Any] = """mock-s3-bucket""" __a : str = F"""s3://{mock_bucket}""" __a : Optional[int] = extract_path_from_uri(_UpperCAmelCase ) assert dataset_path.startswith("""s3://""" ) is False __a : Union[str, Any] = """./local/path""" __a : List[str] = extract_path_from_uri(_UpperCAmelCase ) assert dataset_path == new_dataset_path def _snake_case ( lowercase ) -> Tuple: __a : List[str] = is_remote_filesystem(_UpperCAmelCase ) assert is_remote is True __a : List[str] = fsspec.filesystem("""file""" ) __a : Dict = is_remote_filesystem(_UpperCAmelCase ) assert is_remote is False @pytest.mark.parametrize("""compression_fs_class""" , _UpperCAmelCase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: __a : List[Any] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file} __a : int = input_paths[compression_fs_class.protocol] if input_path is None: __a : str = F"""for \'{compression_fs_class.protocol}\' compression protocol, """ if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(_UpperCAmelCase ) __a : Any = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase ) assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) __a : str = os.path.basename(_UpperCAmelCase ) __a : Tuple = expected_filename[: expected_filename.rindex(""".""" )] assert fs.glob("""*""" ) == [expected_filename] with fs.open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f, open(_UpperCAmelCase , encoding="""utf-8""" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Optional[int] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path} __a : Union[str, Any] = compressed_file_paths[protocol] __a : Optional[Any] = """dataset.jsonl""" __a : List[Any] = F"""{protocol}://{member_file_path}::{compressed_file_path}""" __a , *__a : str = fsspec.get_fs_token_paths(_UpperCAmelCase ) assert fs.isfile(_UpperCAmelCase ) assert not fs.isfile("""non_existing_""" + member_file_path ) @pytest.mark.integration def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Tuple: __a : Optional[int] = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase ) __a : List[Any] = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase ) assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"] assert hffs.isdir("""data""" ) assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" ) with open(_UpperCAmelCase ) as f: assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read() def _snake_case ( ) -> List[str]: __a : Optional[Any] = """bz2""" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase ) with pytest.warns(_UpperCAmelCase ) as warning_info: importlib.reload(datasets.filesystems ) assert len(_UpperCAmelCase ) == 1 assert ( str(warning_info[0].message ) == F"""A filesystem protocol was already set for {protocol} and will be overwritten.""" )
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=[32, 64, 128] , __UpperCamelCase=[1, 2, 1] , __UpperCamelCase=[2, 2, 4] , __UpperCamelCase=2 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-5 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=8 , __UpperCamelCase=["stage1", "stage2"] , __UpperCamelCase=[1, 2] , ): '''simple docstring''' __a : List[str] = parent __a : Dict = batch_size __a : Dict = image_size __a : List[str] = patch_size __a : int = num_channels __a : Dict = embed_dim __a : Dict = hidden_sizes __a : Union[str, Any] = depths __a : str = num_heads __a : Optional[int] = window_size __a : Dict = mlp_ratio __a : Dict = qkv_bias __a : Tuple = hidden_dropout_prob __a : int = attention_probs_dropout_prob __a : Tuple = drop_path_rate __a : Dict = hidden_act __a : Optional[int] = use_absolute_embeddings __a : Dict = patch_norm __a : Tuple = layer_norm_eps __a : List[Any] = initializer_range __a : List[str] = is_training __a : str = scope __a : List[str] = use_labels __a : Optional[Any] = type_sequence_label_size __a : Optional[Any] = encoder_stride __a : Union[str, Any] = out_features __a : List[Any] = out_indices def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Any = None if self.use_labels: __a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : List[Any] = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : str = FocalNetModel(config=__a ) model.to(__a ) model.eval() __a : str = model(__a ) __a : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __a : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() __a : int = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None __a : Optional[int] = None __a : List[str] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() __a : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() __a : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __a : Tuple = 1 __a : Optional[Any] = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() __a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __a : Optional[Any] = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = self.type_sequence_label_size __a : int = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() __a : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __a : Optional[Any] = 1 __a : Union[str, Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() __a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __a : str = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.prepare_config_and_inputs() __a : Optional[int] = config_and_inputs __a : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase__ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowercase__ = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = FocalNetModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def __lowerCamelCase ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): '''simple docstring''' return def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __a : Optional[Any] = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: __a : Any = model_class(__a ) __a : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : Dict = [*signature.parameters.keys()] __a : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __a : int = model(**self._prepare_for_class(__a , __a ) ) __a : int = outputs.hidden_states __a : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length __a : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __a : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __a : Tuple = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) __a : int = reshaped_hidden_states[0].shape __a : Dict = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: __a : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : str = True self.check_hidden_states_output(__a , __a , __a , __a ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = 3 __a : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __a : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __a : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __a : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: __a : Any = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __a : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Optional[Any] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __a : List[str] = _config_zero_init(__a ) for model_class in self.all_model_classes: __a : str = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) __a : Optional[Any] = self.default_image_processor __a : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __a : Union[str, Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): __a : int = model(**__a ) # verify the logits __a : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __a : Union[str, Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): lowercase__ = (FocalNetBackbone,) if is_torch_available() else () lowercase__ = FocalNetConfig lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = FocalNetModelTester(self )
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' def _snake_case ( lowercase = 5_0 ) -> Dict: __a : Optional[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor __SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __snake_case ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' warnings.warn( """The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use YolosImageProcessor instead.""" , _lowercase , ) super().__init__(*_lowercase , **_lowercase )
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger __SCREAMING_SNAKE_CASE : Any = get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( enum.Enum ): lowercase__ = '''all_checks''' lowercase__ = '''basic_checks''' lowercase__ = '''no_checks''' class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass def _snake_case ( lowercase , lowercase , lowercase=None ) -> Dict: if expected_checksums is None: logger.info("""Unable to verify checksums.""" ) return if len(set(lowercase_ ) - set(lowercase_ ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) ) if len(set(lowercase_ ) - set(lowercase_ ) ) > 0: raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) ) __a : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __a : Union[str, Any] = """ for """ + verification_name if verification_name is not None else """""" if len(lowercase_ ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" """Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" ) logger.info("""All the checksums matched successfully""" + for_verification_name ) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass def _snake_case ( lowercase , lowercase ) -> int: if expected_splits is None: logger.info("""Unable to verify splits sizes.""" ) return if len(set(lowercase_ ) - set(lowercase_ ) ) > 0: raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) ) if len(set(lowercase_ ) - set(lowercase_ ) ) > 0: raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) ) __a : int = [ {"""expected""": expected_splits[name], """recorded""": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(lowercase_ ) > 0: raise NonMatchingSplitsSizesError(str(lowercase_ ) ) logger.info("""All the splits matched successfully.""" ) def _snake_case ( lowercase , lowercase = True ) -> dict: if record_checksum: __a : List[Any] = shaaaa() with open(lowercase_ , """rb""" ) as f: for chunk in iter(lambda: f.read(1 << 2_0 ) , b"""""" ): m.update(lowercase_ ) __a : Any = m.hexdigest() else: __a : Union[str, Any] = None return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum} def _snake_case ( lowercase ) -> List[str]: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' from __future__ import annotations def _snake_case ( lowercase ) -> list[int]: return [ord(lowerCAmelCase__ ) - 9_6 for elem in plain] def _snake_case ( lowercase ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def _snake_case ( ) -> None: __a : str = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , lowerCAmelCase__ ) print("""Decoded:""" , decode(lowerCAmelCase__ ) ) if __name__ == "__main__": main()
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> List[Any]: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> List[Any]: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> Dict: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> Dict: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> str: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> Optional[Any]: requires_backends(_lowerCamelCase , ["""torch"""] ) def _snake_case ( *lowercase , **lowercase ) -> Union[str, Any]: requires_backends(_lowerCamelCase , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) class SCREAMING_SNAKE_CASE__ ( metaclass=__UpperCamelCase ): lowercase__ = ["torch"] def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(self , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] ) @classmethod def __lowerCamelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' requires_backends(cls , ["""torch"""] )
700
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : int = self.unet.config.sample_size __a : Optional[int] = (batch_size, 3, img_size, img_size) __a : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a : Dict = self.scheduler.schedule[t] __a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a : Tuple = self.scheduler.step_correct( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , ) __a : Tuple = step_output.prev_sample __a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
697
0
'''simple docstring''' from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase = 6 ): '''simple docstring''' __a : Tuple = None __a : Dict = None self.create_linked_list(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : str = Node() __a : List[str] = current_node __a : Tuple = current_node __a : List[str] = current_node for _ in range(1 , __UpperCamelCase ): __a : Any = Node() __a : List[str] = current_node __a : str = previous_node __a : int = current_node __a : Optional[int] = self.front __a : Dict = previous_node def __lowerCamelCase ( self ): '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def __lowerCamelCase ( self ): '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): __a : str = self.rear.next if self.rear: __a : List[Any] = data def __lowerCamelCase ( self ): '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: __a : List[Any] = self.front.data __a : int = None return data __a : List[str] = self.front __a : Dict = old_front.next __a : int = old_front.data __a : List[str] = None return data def __lowerCamelCase ( self ): '''simple docstring''' if self.is_empty(): raise Exception("""Empty Queue""" ) def __lowerCamelCase ( self ): '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception("""Full Queue""" ) class SCREAMING_SNAKE_CASE__ : def __init__( self ): '''simple docstring''' __a : int = None __a : Dict = None __a : List[str] = None if __name__ == "__main__": import doctest doctest.testmod()
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
'''simple docstring''' import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = logging.get_logger() # the current default level is logging.WARNING __a : Optional[int] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(lowercase__ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = logging.get_verbosity() __a : List[Any] = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __a : List[Any] = "Testing 1, 2, 3" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(lowercase__ ) as cl: logger.warning(lowercase__ ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(lowercase__ ) as cl: logger.warning(lowercase__ ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(lowercase__ ) as cl: logger.warning(lowercase__ ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(lowercase__ ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def __lowerCamelCase ( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var __a : Tuple = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __a : int = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowercase__ ) __a : Dict = logging.log_levels[env_level_str] __a : str = logging.get_verbosity() self.assertEqual( lowercase__ , lowercase__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level __a : Optional[int] = "" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def __lowerCamelCase ( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() __a : List[Any] = logging.logging.getLogger() with CaptureLogger(lowercase__ ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def __lowerCamelCase ( self ): '''simple docstring''' transformers.utils.logging._reset_library_root_logger() __a : str = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __a : List[Any] = "Testing 1, 2, 3" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(lowercase__ ) as cl: logger.warning_advice(lowercase__ ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(lowercase__ ) as cl: logger.warning_advice(lowercase__ ) self.assertEqual(cl.out , msg + """\n""" ) def _snake_case ( ) -> Optional[int]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
702
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
0
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __SCREAMING_SNAKE_CASE : Any = { 'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'], 'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ 'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXJapaneseForCausalLM', 'GPTNeoXJapaneseLayer', 'GPTNeoXJapaneseModel', 'GPTNeoXJapanesePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
703
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) import datasets __a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) __a : Tuple = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] __a : Any = object_detector(__UpperCamelCase , threshold=0.0 ) self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) __a : Union[str, Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """facebook/detr-resnet-50""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : Optional[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : int = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : List[str] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 0.9_9_8_5 __a : Union[str, Any] = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """Narsil/layoutlmv3-finetuned-funsd""" __a : List[Any] = 0.9_9_9_3 __a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase ) __a : List[str] = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
697
0
'''simple docstring''' import functools def _snake_case ( lowercase , lowercase ) -> Optional[Any]: __a : Tuple = len(__lowercase ) __a : Any = len(__lowercase ) @functools.cache def min_distance(lowercase , lowercase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __a : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __lowercase ) , 1 + min_distance(__lowercase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
from statistics import mean, stdev def _snake_case ( lowercase , lowercase = 3 ) -> int: __a : Dict = min(__snake_case ) __a : Tuple = max(__snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , __snake_case ) for x in data] def _snake_case ( lowercase , lowercase = 3 ) -> Union[str, Any]: __a : str = mean(__snake_case ) __a : Any = stdev(__snake_case ) # standardize data return [round((x - mu) / (sigma) , __snake_case ) for x in data]
705
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Any = params __a : Optional[Any] = np.array(__UpperCamelCase ) __a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __UpperCamelCase ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __lowerCamelCase ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.params.max_model_input_size __a : Union[str, Any] = self.lengths > max_len logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" ) def divide_chunks(__UpperCamelCase , __UpperCamelCase ): return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )] __a : int = [] __a : Union[str, Any] = [] if self.params.mlm: __a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: __a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __a : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase ) if sub_s[-1] != sep_id: __a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase ) assert len(__UpperCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__UpperCamelCase ) new_tok_ids.extend(__UpperCamelCase ) new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] ) __a : Dict = np.array(__UpperCamelCase ) __a : Tuple = np.array(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = len(self ) __a : List[str] = self.lengths > 11 __a : int = self.token_ids[indices] __a : Union[str, Any] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __lowerCamelCase ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __a : List[str] = self.params.special_tok_ids["""unk_token"""] __a : str = len(self ) __a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __a : Optional[Any] = (unk_occs / self.lengths) < 0.5 __a : List[str] = self.token_ids[indices] __a : Optional[int] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = [t[0] for t in batch] __a : str = [t[1] for t in batch] assert len(__UpperCamelCase ) == len(__UpperCamelCase ) # Max for paddings __a : Optional[int] = max(__UpperCamelCase ) # Pad token ids if self.params.mlm: __a : int = self.params.special_tok_ids["""pad_token"""] else: __a : Tuple = self.params.special_tok_ids["""unk_token"""] __a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids] assert len(tk_ ) == len(__UpperCamelCase ) assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ ) __a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) __a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs) return tk_t, lg_t
697
0
'''simple docstring''' import math class SCREAMING_SNAKE_CASE__ : def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for i in range(len(snake_case__ ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' for i in range(len(snake_case__ ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def _snake_case ( ) -> int: # Training Examples ( m, n ) __a : Optional[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __a : Tuple = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __a : List[Any] = SelfOrganizingMap() __a : Optional[int] = 3 __a : Optional[Any] = 0.5 for _ in range(lowerCAmelCase__ ): for j in range(len(lowerCAmelCase__ ) ): # training sample __a : str = training_samples[j] # Compute the winning vector __a : Any = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # Update the winning vector __a : Union[str, Any] = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # classify test sample __a : int = [0, 0, 0, 1] __a : Optional[int] = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ ) # results print(F"""Clusters that the test sample belongs to : {winner}""" ) print(F"""Weights that have been trained : {weights}""" ) # running the main() function if __name__ == "__main__": main()
706
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "" lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(self , **__UpperCamelCase ) __a : int = repo_info __a : int = token __a : Any = None def __lowerCamelCase ( self ): '''simple docstring''' if self.dir_cache is None: __a : Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __a : List[str] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : str = self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : int = PurePosixPath(path.strip("""/""" ) ) __a : List[str] = {} for p, f in self.dir_cache.items(): __a : str = PurePosixPath(p.strip("""/""" ) ) __a : Optional[int] = p.parent if root == path: __a : List[str] = f __a : str = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
697
0
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase ): '''simple docstring''' __a : Optional[int] = str(id_ ) __a : Any = None __a : int = None __a : Optional[Any] = [] __a : Union[str, Any] = {} # {vertex:distance} def __lt__( self , __UpperCamelCase ): '''simple docstring''' return self.key < other.key def __repr__( self ): '''simple docstring''' return self.id def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' self.neighbors.append(a_ ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = weight def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> str: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase_ ) graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase_ ) def _snake_case ( lowercase , lowercase ) -> List[Any]: __a : List[str] = [] for u in graph: __a : List[Any] = math.inf __a : Any = None __a : Optional[Any] = 0 __a : Tuple = graph[:] while q: __a : Union[str, Any] = min(lowerCAmelCase_ ) q.remove(lowerCAmelCase_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __a : Tuple = u __a : List[str] = u.edges[v.id] for i in range(1 , len(lowerCAmelCase_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _snake_case ( lowercase , lowercase ) -> Optional[int]: for u in graph: __a : Any = math.inf __a : Optional[Any] = None __a : List[Any] = 0 __a : Optional[Any] = list(lowerCAmelCase_ ) hq.heapify(lowerCAmelCase_ ) while h: __a : Union[str, Any] = hq.heappop(lowerCAmelCase_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __a : List[Any] = u __a : Union[str, Any] = u.edges[v.id] hq.heapify(lowerCAmelCase_ ) for i in range(1 , len(lowerCAmelCase_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _snake_case ( ) -> Optional[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ): '''simple docstring''' __a : List[str] = parent __a : Tuple = batch_size __a : str = image_size __a : int = patch_size __a : Dict = num_channels __a : int = is_training __a : Dict = use_labels __a : Union[str, Any] = hidden_size __a : Dict = num_hidden_layers __a : Dict = backbone_out_indices __a : Optional[int] = num_attention_heads __a : List[str] = intermediate_size __a : Optional[Any] = hidden_act __a : Dict = hidden_dropout_prob __a : Tuple = attention_probs_dropout_prob __a : Any = initializer_range __a : Any = num_labels __a : Optional[Any] = backbone_featmap_shape __a : List[Any] = scope __a : List[str] = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __a : Union[str, Any] = (image_size // patch_size) ** 2 __a : List[str] = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Union[str, Any] = None if self.use_labels: __a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a : Tuple = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = DPTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Tuple = model(__UpperCamelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = self.num_labels __a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase__ = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = DPTModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Any = model_class(__UpperCamelCase ) __a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = True if model_class in get_values(__UpperCamelCase ): continue __a : str = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() __a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : List[Any] = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = False __a : Dict = True if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue __a : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.gradient_checkpointing_enable() model.train() __a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : Dict = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: __a : Any = model_class(config=__UpperCamelCase ) # Skip the check for the backbone __a : Optional[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __a : int = DPTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[int] = """add""" with self.assertRaises(__UpperCamelCase ): __a : int = DPTForDepthEstimation(__UpperCamelCase ) def _snake_case ( ) -> Any: __a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) __a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase ) __a : Union[str, Any] = prepare_img() __a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**__UpperCamelCase ) __a : int = outputs.predicted_depth # verify the predicted depth __a : Any = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __UpperCamelCase ) __a : int = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
697
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
708
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __a : Optional[int] = Vector() def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3, 4] ) self.assertEqual(len(__UpperCamelCase ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = Vector([1, 2] ) __a : List[str] = Vector([1, 2, 3, 4, 5] ) __a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Vector([1, 2, 3] ) __a : Any = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3] ) __a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product __a : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Optional[int] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = Vector([1, 0, 0, 0, 0, 0] ) __a : Any = x.copy() self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __a : List[Any] = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
697
0
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __SCREAMING_SNAKE_CASE : List[str] = threading.Lock() __SCREAMING_SNAKE_CASE : Optional[logging.Handler] = None __SCREAMING_SNAKE_CASE : List[Any] = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } __SCREAMING_SNAKE_CASE : Optional[int] = logging.WARNING __SCREAMING_SNAKE_CASE : List[Any] = True def _snake_case ( ) -> List[Any]: __a : Optional[Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , _A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def _snake_case ( ) -> str: return __name__.split(""".""" )[0] def _snake_case ( ) -> logging.Logger: return logging.getLogger(_get_library_name() ) def _snake_case ( ) -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return __a : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream. __a : List[str] = sys.stderr.flush # Apply our default configuration to the library root logger. __a : Optional[Any] = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) __a : Union[str, Any] = False def _snake_case ( ) -> None: global _default_handler with _lock: if not _default_handler: return __a : Tuple = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) __a : Dict = None def _snake_case ( ) -> Optional[int]: return log_levels def _snake_case ( lowercase = None ) -> logging.Logger: if name is None: __a : str = _get_library_name() _configure_library_root_logger() return logging.getLogger(_A ) def _snake_case ( ) -> int: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def _snake_case ( lowercase ) -> None: _configure_library_root_logger() _get_library_root_logger().setLevel(_A ) def _snake_case ( ) -> int: return set_verbosity(_A ) def _snake_case ( ) -> List[Any]: return set_verbosity(_A ) def _snake_case ( ) -> Tuple: return set_verbosity(_A ) def _snake_case ( ) -> List[Any]: return set_verbosity(_A ) def _snake_case ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def _snake_case ( ) -> None: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def _snake_case ( lowercase ) -> None: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(_A ) def _snake_case ( lowercase ) -> None: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(_A ) def _snake_case ( ) -> None: _configure_library_root_logger() __a : Optional[Any] = False def _snake_case ( ) -> None: _configure_library_root_logger() __a : int = True def _snake_case ( ) -> None: __a : Dict = _get_library_root_logger().handlers for handler in handlers: __a : Dict = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(_A ) def _snake_case ( ) -> None: __a : Optional[int] = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(_A ) def _snake_case ( self , *lowercase , **lowercase ) -> Union[str, Any]: __a : Union[str, Any] = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , _A ) if no_advisory_warnings: return self.warning(*_A , **_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = warning_advice @functools.lru_cache(_A ) def _snake_case ( self , *lowercase , **lowercase ) -> str: self.warning(*_A , **_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = warning_once class SCREAMING_SNAKE_CASE__ : def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): # pylint: disable=unused-argument '''simple docstring''' __a : Tuple = args[0] if args else None def __iter__( self ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self , __UpperCamelCase ): '''simple docstring''' def empty_fn(*__UpperCamelCase , **__UpperCamelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): '''simple docstring''' return self def __exit__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' return class SCREAMING_SNAKE_CASE__ : def __call__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) else: return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' __a : Optional[int] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __SCREAMING_SNAKE_CASE : Optional[Any] = _tqdm_cls() def _snake_case ( ) -> bool: global _tqdm_active return bool(_tqdm_active ) def _snake_case ( ) -> Any: global _tqdm_active __a : Tuple = True hf_hub_utils.enable_progress_bars() def _snake_case ( ) -> Tuple: global _tqdm_active __a : Optional[int] = False hf_hub_utils.disable_progress_bars()
709
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __SCREAMING_SNAKE_CASE : List[str] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __SCREAMING_SNAKE_CASE : Optional[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __SCREAMING_SNAKE_CASE : Tuple = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __SCREAMING_SNAKE_CASE : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __SCREAMING_SNAKE_CASE : Optional[int] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _snake_case ( ) -> List[str]: __a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) ) __a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _snake_case ( lowercase = 1_0_0 ) -> Any: return (generate_random_hand() for _ in range(lowercase )) @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> int: assert PokerHand(lowercase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Any: assert PokerHand(lowercase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : Union[str, Any] = PokerHand(lowercase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Union[str, Any]: assert PokerHand(lowercase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def _snake_case ( lowercase , lowercase , lowercase ) -> int: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected def _snake_case ( ) -> Union[str, Any]: __a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS] __a : Optional[int] = poker_hands.copy() shuffle(lowercase ) __a : List[str] = chain(sorted(lowercase ) ) for index, hand in enumerate(lowercase ): assert hand == poker_hands[index] def _snake_case ( ) -> List[str]: # Test that five high straights are compared correctly. __a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=lowercase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _snake_case ( ) -> List[str]: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __a : Dict = PokerHand("""2C 4S AS 3D 5C""" ) __a : Dict = True __a : Optional[int] = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _snake_case ( ) -> Dict: # Problem number 54 from Project Euler # Testing from poker_hands.txt file __a : Tuple = 0 __a : int = os.path.abspath(os.path.dirname(lowercase ) ) __a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" ) with open(lowercase ) as file_hand: for line in file_hand: __a : Union[str, Any] = line[:1_4].strip() __a : Optional[Any] = line[1_5:].strip() __a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase ) __a : str = player.compare_with(lowercase ) if output == "Win": answer += 1 assert answer == 3_7_6
697
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : Any = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []} __SCREAMING_SNAKE_CASE : str = ['a', 'b', 'c', 'd', 'e'] def _snake_case ( lowercase , lowercase , lowercase ) -> Union[str, Any]: __a : Optional[int] = start # add current to visited visited.append(A_ ) __a : int = edges[current] for neighbor in neighbors: # if neighbor not in visited, visit if neighbor not in visited: __a : Optional[Any] = topological_sort(A_ , A_ , A_ ) # if all neighbors visited add current to sort sort.append(A_ ) # if all vertices haven't been visited select a new one to visit if len(A_ ) != len(A_ ): for vertice in vertices: if vertice not in visited: __a : int = topological_sort(A_ , A_ , A_ ) # return sort return sort if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = topological_sort('a', [], []) print(sort)
710
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase ): '''simple docstring''' __a : Tuple = str(id_ ) __a : Optional[int] = None __a : List[str] = None __a : int = [] __a : int = {} # {vertex:distance} def __lt__( self , __UpperCamelCase ): '''simple docstring''' return self.key < other.key def __repr__( self ): '''simple docstring''' return self.id def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' self.neighbors.append(_A ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : str = weight def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> str: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowercase ) graph[b - 1].add_edge(graph[a - 1] , lowercase ) def _snake_case ( lowercase , lowercase ) -> list: __a : Optional[Any] = [] for u in graph: __a : str = math.inf __a : int = None __a : Tuple = 0 __a : Union[str, Any] = graph[:] while q: __a : Tuple = min(lowercase ) q.remove(lowercase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): __a : Dict = u __a : List[Any] = u.edges[v.id] for i in range(1 , len(lowercase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _snake_case ( lowercase , lowercase ) -> Iterator[tuple]: for u in graph: __a : Any = math.inf __a : Any = None __a : Optional[int] = 0 __a : Any = list(lowercase ) hq.heapify(lowercase ) while h: __a : Optional[Any] = hq.heappop(lowercase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): __a : Optional[int] = u __a : Dict = u.edges[v.id] hq.heapify(lowercase ) for i in range(1 , len(lowercase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _snake_case ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
711
'''simple docstring''' from __future__ import annotations import bisect def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Union[str, Any] = len(lowercase ) while lo < hi: __a : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __a : int = mid + 1 else: __a : int = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Any = len(lowercase ) while lo < hi: __a : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __a : List[str] = mid + 1 else: __a : Any = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase ) -> int | None: __a : Dict = 0 __a : Any = len(lowercase ) - 1 while left <= right: __a : str = left + (right - left) // 2 __a : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __a : Optional[Any] = midpoint - 1 else: __a : Optional[int] = midpoint + 1 return None def _snake_case ( lowercase , lowercase ) -> int | None: __a : Optional[int] = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None: if right < left: return None __a : Any = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip() __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(',')) __SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n')) __SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
697
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _snake_case ( lowercase , lowercase=0.9_9_9 , lowercase="cosine" , ) -> Tuple: if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __a : Optional[Any] = [] for i in range(__snake_case ): __a : str = i / num_diffusion_timesteps __a : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) ) return torch.tensor(__snake_case , dtype=torch.floataa ) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase ): lowercase__ = [e.name for e in KarrasDiffusionSchedulers] lowercase__ = 2 @register_to_config def __init__( self , __UpperCamelCase = 1000 , __UpperCamelCase = 0.0_0_0_8_5 , __UpperCamelCase = 0.0_1_2 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "epsilon" , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 1.0 , __UpperCamelCase = "linspace" , __UpperCamelCase = 0 , ): '''simple docstring''' if trained_betas is not None: __a : Any = torch.tensor(__a , dtype=torch.floataa ) elif beta_schedule == "linear": __a : Tuple = torch.linspace(__a , __a , __a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __a : List[str] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __a : Dict = betas_for_alpha_bar(__a , alpha_transform_type="""cosine""" ) elif beta_schedule == "exp": __a : Optional[int] = betas_for_alpha_bar(__a , alpha_transform_type="""exp""" ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __a : str = 1.0 - self.betas __a : Any = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(__a , __a , __a ) __a : Tuple = use_karras_sigmas def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None ): '''simple docstring''' if schedule_timesteps is None: __a : Optional[Any] = self.timesteps __a : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __a : Union[str, Any] = 1 if len(__a ) > 1 else 0 else: __a : int = timestep.cpu().item() if torch.is_tensor(__a ) else timestep __a : str = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCamelCase ( self ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : List[Any] = self.index_for_timestep(__a ) __a : Union[str, Any] = self.sigmas[step_index] __a : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , ): '''simple docstring''' __a : str = num_inference_steps __a : Optional[int] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __a : List[Any] = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a )[::-1].copy() elif self.config.timestep_spacing == "leading": __a : str = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a : Optional[Any] = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(__a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __a : Tuple = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a : Dict = (np.arange(__a , 0 , -step_ratio )).round().copy().astype(__a ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.""" ) __a : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __a : int = np.log(__a ) __a : Dict = np.interp(__a , np.arange(0 , len(__a ) ) , __a ) if self.config.use_karras_sigmas: __a : List[str] = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps ) __a : Optional[Any] = np.array([self._sigma_to_t(__a , __a ) for sigma in sigmas] ) __a : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __a : Tuple = torch.from_numpy(__a ).to(device=__a ) __a : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __a : Any = torch.from_numpy(__a ) __a : int = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(__a ).startswith("""mps""" ): # mps does not support float64 __a : Optional[int] = timesteps.to(__a , dtype=torch.floataa ) else: __a : List[Any] = timesteps.to(device=__a ) # empty dt and derivative __a : str = None __a : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __a : Optional[int] = defaultdict(__a ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[int] = np.log(__a ) # get distribution __a : Optional[int] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __a : List[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __a : Optional[int] = low_idx + 1 __a : List[Any] = log_sigmas[low_idx] __a : int = log_sigmas[high_idx] # interpolate sigmas __a : str = (low - log_sigma) / (low - high) __a : int = np.clip(__a , 0 , 1 ) # transform interpolation to time range __a : List[str] = (1 - w) * low_idx + w * high_idx __a : Optional[int] = t.reshape(sigma.shape ) return t def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = in_sigmas[-1].item() __a : Tuple = in_sigmas[0].item() __a : List[str] = 7.0 # 7.0 is the value used in the paper __a : Dict = np.linspace(0 , 1 , __a ) __a : Any = sigma_min ** (1 / rho) __a : Tuple = sigma_max ** (1 / rho) __a : Optional[int] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __lowerCamelCase ( self ): '''simple docstring''' return self.dt is None def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , ): '''simple docstring''' __a : Tuple = self.index_for_timestep(__a ) # advance index counter by 1 __a : Any = timestep.cpu().item() if torch.is_tensor(__a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __a : List[str] = self.sigmas[step_index] __a : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __a : Tuple = self.sigmas[step_index - 1] __a : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __a : Any = 0 __a : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __a : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next __a : List[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __a : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next __a : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __a : List[Any] = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: __a : Union[str, Any] = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __a : Union[str, Any] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __a : Optional[int] = sigma_next - sigma_hat # store for 2nd order step __a : List[str] = derivative __a : int = dt __a : int = sample else: # 2. 2nd order / Heun's method __a : int = (sample - pred_original_sample) / sigma_next __a : str = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __a : List[str] = self.dt __a : Dict = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __a : Dict = None __a : List[str] = None __a : Tuple = None __a : Union[str, Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' __a : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__a ): # mps does not support float64 __a : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __a : Optional[Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __a : str = self.timesteps.to(original_samples.device ) __a : int = timesteps.to(original_samples.device ) __a : List[Any] = [self.index_for_timestep(__a , __a ) for t in timesteps] __a : Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __a : Any = sigma.unsqueeze(-1 ) __a : Optional[Any] = original_samples + noise * sigma return noisy_samples def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
712
'''simple docstring''' from itertools import product def _snake_case ( lowercase , lowercase ) -> list[int]: __a : Optional[int] = sides_number __a : Union[str, Any] = max_face_number * dice_number __a : Optional[Any] = [0] * (max_total + 1) __a : Dict = 1 __a : str = range(lowercase , max_face_number + 1 ) for dice_numbers in product(lowercase , repeat=lowercase ): __a : int = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ) -> float: __a : Tuple = total_frequency_distribution( sides_number=4 , dice_number=9 ) __a : Union[str, Any] = total_frequency_distribution( sides_number=6 , dice_number=6 ) __a : str = 0 __a : Dict = 9 __a : str = 4 * 9 __a : Any = 6 for peter_total in range(lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __a : str = (4**9) * (6**6) __a : List[Any] = peter_wins_count / total_games_number __a : List[Any] = round(lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
697
0
'''simple docstring''' import baseaa def _snake_case ( lowercase ) -> int: return baseaa.baaencode(string.encode("""utf-8""" ) ) def _snake_case ( lowercase ) -> Dict: return baseaa.baadecode(lowerCamelCase_ ).decode("""utf-8""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = 'Hello World!' __SCREAMING_SNAKE_CASE : List[str] = baseaa_encode(test) print(encoded) __SCREAMING_SNAKE_CASE : List[Any] = baseaa_decode(encoded) print(decoded)
713
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Union[str, Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = len(__UpperCamelCase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCamelCase )}.""" ) # get prompt text embeddings __a : Tuple = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __a : Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Union[str, Any] = text_embeddings.shape __a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) __a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Optional[Any] = [""""""] elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=""" f""" {type(__UpperCamelCase )}.""" ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: __a : Tuple = negative_prompt __a : Any = text_input_ids.shape[-1] __a : List[str] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) __a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[str] = uncond_embeddings.shape[1] __a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 ) __a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Any = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device ) __a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: __a : Optional[int] = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) __a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Optional[Any] = latents_reference.to(self.device ) __a : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : int = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : Optional[Any] = 0 if dx < 0 else dx __a : Optional[Any] = 0 if dy < 0 else dy __a : Optional[int] = max(-dx , 0 ) __a : Optional[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[Any] = {} if accepts_eta: __a : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual __a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __a , __a : List[str] = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents __a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __a , __a : int = self.safety_checker( images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[int] = None if output_type == "pil": __a : str = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
697
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : Any = 'Alexander Joslin' import operator as op from .stack import Stack def _snake_case ( lowercase ) -> Dict: __a : str = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} __a : List[str] = Stack() __a : Tuple = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(A__ ) ) elif i in operators: # RULE 2 operator_stack.push(A__ ) elif i == ")": # RULE 4 __a : Any = operator_stack.peek() operator_stack.pop() __a : Optional[Any] = operand_stack.peek() operand_stack.pop() __a : List[Any] = operand_stack.peek() operand_stack.pop() __a : Any = operators[opr](A__ , A__ ) operand_stack.push(A__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
714
'''simple docstring''' import numpy as np from PIL import Image def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : Any = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : Union[str, Any] = 0 __a : Dict = 0 __a : Optional[Any] = 0 __a : Tuple = 0 # compute the shape of the output matrix __a : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a : int = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : Optional[Any] = 0 __a : str = 0 return updated_arr def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : int = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : int = 0 __a : Optional[Any] = 0 __a : str = 0 __a : List[Any] = 0 # compute the shape of the output matrix __a : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a : Any = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : str = 0 __a : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __SCREAMING_SNAKE_CASE : str = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
0
'''simple docstring''' from __future__ import annotations import math def _snake_case ( lowercase ) -> List[str]: if num <= 0: __a : Union[str, Any] = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(lowerCamelCase__ ) __a : str = [True] * (num + 1) __a : int = [] __a : Dict = 2 __a : Any = int(math.sqrt(lowerCamelCase__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(lowerCamelCase__ ) # Set multiples of start be False for i in range(start * start , num + 1 , lowerCamelCase__ ): if sieve[i] is True: __a : str = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(lowerCamelCase__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' import math def _snake_case ( lowercase , lowercase ) -> float: return math.pow(a__ , 2 ) - a def _snake_case ( lowercase ) -> float: return 2 * x def _snake_case ( lowercase ) -> float: __a : Union[str, Any] = 2.0 while start <= a: __a : int = math.pow(a__ , 2 ) return start def _snake_case ( lowercase , lowercase = 9_9_9_9 , lowercase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float: if a < 0: raise ValueError("""math domain error""" ) __a : Dict = get_initial_point(a__ ) for _ in range(a__ ): __a : Optional[int] = value __a : Dict = value - fx(a__ , a__ ) / fx_derivative(a__ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): lowercase__ = ['''audio_values''', '''audio_mask'''] def __init__( self , __UpperCamelCase=2048 , __UpperCamelCase=1 , __UpperCamelCase=[16, 16] , __UpperCamelCase=128 , __UpperCamelCase=4_4100 , __UpperCamelCase=86 , __UpperCamelCase=2048 , __UpperCamelCase=0.0 , **__UpperCamelCase , ): '''simple docstring''' super().__init__( feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , **lowerCAmelCase_ , ) __a : Tuple = spectrogram_length __a : List[Any] = num_channels __a : Optional[Any] = patch_size __a : Union[str, Any] = feature_size // self.patch_size[1] __a : Tuple = n_fft __a : Optional[int] = sampling_rate // hop_length_to_sampling_rate __a : str = sampling_rate __a : str = padding_value __a : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , ).T def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = spectrogram( lowerCAmelCase_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=8_0.0 , ) __a : List[Any] = log_spec[:, :-1] __a : Any = log_spec - 2_0.0 __a : str = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Union[str, Any] = isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : List[str] = is_batched_numpy or ( isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Optional[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ): __a : int = np.asarray(lowerCAmelCase_ , dtype=np.floataa ) elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Dict = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __a : Dict = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , lowerCAmelCase_ ): __a : str = [np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __a : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __a : Tuple = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __a : str = np.array(lowerCAmelCase_ ).astype(np.floataa ) # convert into correct format for padding __a : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __a : List[Any] = np.ones([len(lowerCAmelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __a : Optional[int] = padded_audio_features * self.padding_value for i in range(len(lowerCAmelCase_ ) ): __a : Tuple = audio_features[i] __a : Dict = feature # return as BatchFeature if return_attention_mask: __a : Tuple = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __a : Optional[Any] = {"""audio_values""": padded_audio_features} __a : Optional[Any] = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) return encoded_inputs
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint __SCREAMING_SNAKE_CASE : Dict = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } __SCREAMING_SNAKE_CASE : List[str] = { '169M': 768, '430M': 1_024, '1B5': 2_048, '3B': 2_560, '7B': 4_096, '14B': 5_120, } def _snake_case ( lowercase ) -> List[str]: __a : List[Any] = list(state_dict.keys() ) for name in state_dict_keys: __a : Tuple = state_dict.pop(lowercase ) # emb -> embedding if name.startswith("""emb.""" ): __a : int = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): __a : List[str] = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention __a : Union[str, Any] = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , lowercase ) # ffn -> feed_forward __a : Dict = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , lowercase ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): __a : Optional[int] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): __a : List[str] = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): __a : List[Any] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": __a : Tuple = '''rwkv.''' + name __a : List[str] = weight return state_dict def _snake_case ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=False , lowercase=None ) -> Tuple: if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) __a : str = 5_0_2_7_7 __a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: __a : Tuple = PreTrainedTokenizerFast(tokenizer_file=lowercase ) __a : str = len(lowercase ) tokenizer.save_pretrained(lowercase ) # 2. Build the config __a : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: __a : Union[str, Any] = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) __a : Union[str, Any] = RwkvConfig( vocab_size=lowercase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(lowercase ) # 3. Download model file then convert state_dict __a : Union[str, Any] = hf_hub_download(lowercase , lowercase ) __a : List[Any] = torch.load(lowercase , map_location="""cpu""" ) __a : Tuple = convert_state_dict(lowercase ) # 4. Split in shards and save __a : Optional[Any] = shard_checkpoint(lowercase ) for shard_file, shard in shards.items(): torch.save(lowercase , os.path.join(lowercase , lowercase ) ) if index is not None: __a : Dict = os.path.join(lowercase , lowercase ) # Save the index as well with open(lowercase , """w""" , encoding="""utf-8""" ) as f: __a : Optional[int] = json.dumps(lowercase , indent=2 , sort_keys=lowercase ) + '''\n''' f.write(lowercase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.""" ) __a : Tuple = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: __a : Union[str, Any] = torch.load(os.path.join(lowercase , lowercase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase , lowercase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) __a : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase ) model.push_to_hub(lowercase , max_shard_size="""2GB""" ) tokenizer.push_to_hub(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' import math def _snake_case ( lowercase , lowercase ) -> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 3_6_0: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def _snake_case ( ) -> List[str]: __a : Optional[Any] = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7], } __a : List[str] = Dataset.from_dict(__a ) return dataset class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = get_dataset() __a : str = make_duplicate_clusters(SCREAMING_SNAKE_CASE_ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = get_dataset() __a , __a : List[str] = deduplicate_dataset(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 ) print(SCREAMING_SNAKE_CASE_ ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , SCREAMING_SNAKE_CASE_ )
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
'''simple docstring''' import numpy as np def _snake_case ( lowercase ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def _snake_case ( lowercase ) -> np.ndarray: return vector * sigmoid(lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : int = self.unet.config.sample_size __a : Optional[int] = (batch_size, 3, img_size, img_size) __a : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a : Dict = self.scheduler.schedule[t] __a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a : Tuple = self.scheduler.step_correct( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , ) __a : Tuple = step_output.prev_sample __a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
697
0
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _snake_case ( lowercase ) -> Dict: if not is_accelerate_available(): return method __a : List[Any] = version.parse(accelerate.__version__ ).base_version if version.parse(_lowerCAmelCase ) < version.parse("""0.17.0""" ): return method def wrapper(self , *lowercase , **lowercase ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *_lowerCAmelCase , **_lowerCAmelCase ) return wrapper
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ) -> Optional[int]: __a : Union[str, Any] = HfArgumentParser(lowerCamelCase_ ) __a : List[str] = parser.parse_args_into_dataclasses()[0] __a : Union[str, Any] = TensorFlowBenchmark(args=lowerCamelCase_ ) try: __a : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: __a : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' __a : List[Any] = ''' '''.join(str(lowerCamelCase_ ).split(""" """ )[:-1] ) __a : Optional[Any] = '''''' __a : List[Any] = eval(str(lowerCamelCase_ ).split(""" """ )[-1] ) __a : List[Any] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: __a : Any = full_error_msg + begin_error_msg + str(lowerCamelCase_ ) raise ValueError(lowerCamelCase_ ) benchmark.run() if __name__ == "__main__": main()
702
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
0
'''simple docstring''' import os def _snake_case ( ) -> Optional[Any]: '''simple docstring''' __a : int = os.path.dirname(os.path.realpath(UpperCamelCase__ ) ) __a : Tuple = os.path.join(UpperCamelCase__ , """triangle.txt""" ) with open(UpperCamelCase__ ) as f: __a : int = f.readlines() __a : int = [] for line in triangle: __a : Tuple = [] for number in line.strip().split(""" """ ): numbers_from_line.append(int(UpperCamelCase__ ) ) a.append(UpperCamelCase__ ) for i in range(1 , len(UpperCamelCase__ ) ): for j in range(len(a[i] ) ): __a : str = a[i - 1][j] if j != len(a[i - 1] ) else 0 __a : Tuple = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(UpperCamelCase__ , UpperCamelCase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
703
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE__ : @staticmethod def __lowerCamelCase ( *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = ObjectDetectionPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 ) self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) import datasets __a : Optional[int] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) __a : Tuple = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] __a : Any = object_detector(__UpperCamelCase , threshold=0.0 ) self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) ) for outputs in batch_outputs: self.assertGreater(len(__UpperCamelCase ) , 0 ) for detected_object in outputs: self.assertEqual( __UpperCamelCase , { """score""": ANY(__UpperCamelCase ), """label""": ANY(__UpperCamelCase ), """box""": {"""xmin""": ANY(__UpperCamelCase ), """ymin""": ANY(__UpperCamelCase ), """xmax""": ANY(__UpperCamelCase ), """ymax""": ANY(__UpperCamelCase )}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_torch def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = """hf-internal-testing/tiny-detr-mobilenetsv3""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : Optional[Any] = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : str = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ] , ) __a : Union[str, Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], [ {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, {"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """facebook/detr-resnet-50""" __a : Dict = AutoModelForObjectDetection.from_pretrained(__UpperCamelCase ) __a : int = AutoFeatureExtractor.from_pretrained(__UpperCamelCase ) __a : int = ObjectDetectionPipeline(model=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : Optional[Any] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : int = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Optional[int] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) __a : List[str] = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], [ {"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}}, {"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}}, {"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}}, {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ], ] , ) @require_torch @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = 0.9_9_8_5 __a : Union[str, Any] = """facebook/detr-resnet-50""" __a : Optional[int] = pipeline("""object-detection""" , model=__UpperCamelCase ) __a : Union[str, Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=__UpperCamelCase ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}}, {"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}}, ] , ) @require_torch @require_pytesseract @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : str = """Narsil/layoutlmv3-finetuned-funsd""" __a : List[Any] = 0.9_9_9_3 __a : Dict = pipeline("""object-detection""" , model=__UpperCamelCase , threshold=__UpperCamelCase ) __a : List[str] = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [ {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, {"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}}, ] , )
697
0
'''simple docstring''' from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , ): '''simple docstring''' __a : Tuple = parent __a : List[str] = 13 __a : Tuple = 7 __a : List[Any] = 30 __a : Optional[int] = self.seq_length + self.mem_len __a : List[str] = 15 __a : Union[str, Any] = True __a : Dict = True __a : List[Any] = 99 __a : List[Any] = [10, 50, 80] __a : int = 32 __a : Tuple = 32 __a : List[Any] = 4 __a : Any = 8 __a : str = 128 __a : Any = 2 __a : List[str] = 2 __a : Dict = None __a : List[Any] = 1 __a : List[str] = 0 __a : str = 3 __a : Union[str, Any] = self.vocab_size - 1 __a : str = 0.0_1 def __lowerCamelCase ( self ): '''simple docstring''' __a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = None if self.use_labels: __a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : str = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __lowerCamelCase ( self ): '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = TFTransfoXLModel(lowercase_ ) __a : Union[str, Any] = model(lowercase_ ).to_tuple() __a : Optional[int] = {"input_ids": input_ids_a, "mems": mems_a} __a : Dict = model(lowercase_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : str = TFTransfoXLLMHeadModel(lowercase_ ) __a : int = model(lowercase_ ).to_tuple() __a : List[str] = {"input_ids": input_ids_a, "labels": lm_labels} __a : List[Any] = model(lowercase_ ).to_tuple() __a : str = model([input_ids_a, mems_a] ).to_tuple() __a : List[Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} __a : Dict = model(lowercase_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Tuple = TFTransfoXLForSequenceClassification(lowercase_ ) __a : Any = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.prepare_config_and_inputs() (__a) : Union[str, Any] = config_and_inputs __a : int = {"input_ids": input_ids_a} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( snake_case__ , snake_case__ , unittest.TestCase ): lowercase__ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowercase__ = () if is_tf_available() else () lowercase__ = ( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __lowerCamelCase ( self ): '''simple docstring''' __a : int = TFTransfoXLModelTester(self ) __a : Any = ConfigTester(self , config_class=lowercase_ , d_embed=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def __lowerCamelCase ( self ): '''simple docstring''' self.model_tester.set_seed() __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowercase_ ) def __lowerCamelCase ( self ): '''simple docstring''' self.model_tester.set_seed() __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowercase_ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowercase_ ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : str = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __a : Dict = model_class(lowercase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __a : Union[str, Any] = model.get_output_embeddings() assert isinstance(lowercase_ , tf.keras.layers.Layer ) __a : Union[str, Any] = model.get_bias() assert name is None else: __a : Dict = model.get_output_embeddings() assert x is None __a : str = model.get_bias() assert name is None def __lowerCamelCase ( self ): '''simple docstring''' pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : int = TFTransfoXLModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @require_tf class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off __a : Union[str, Any] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __a : Union[str, Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __a : Dict = model.generate(lowercase_ , max_length=200 , do_sample=lowercase_ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
704
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __a : Any = TapasConfig.from_json_file(lowercase ) # set absolute/relative position embeddings parameter __a : Any = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __a : List[str] = TapasForQuestionAnswering(config=lowercase ) elif task == "WTQ": # run_task_main.py hparams __a : Tuple = 4 __a : List[Any] = True # hparam_utils.py hparams __a : Optional[int] = 0.6_6_4_6_9_4 __a : Tuple = 0.2_0_7_9_5_1 __a : int = 0.1_2_1_1_9_4 __a : str = True __a : Optional[Any] = True __a : Optional[Any] = False __a : str = 0.0_3_5_2_5_1_3 __a : List[str] = TapasForQuestionAnswering(config=lowercase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __a : Dict = 4 __a : Optional[Any] = False # hparam_utils.py hparams __a : Dict = 3_6.4_5_1_9 __a : str = 0.9_0_3_4_2_1 __a : Optional[int] = 2_2_2.0_8_8 __a : List[Any] = True __a : List[Any] = True __a : int = True __a : int = 0.7_6_3_1_4_1 __a : int = TapasForQuestionAnswering(config=lowercase ) elif task == "TABFACT": __a : Union[str, Any] = TapasForSequenceClassification(config=lowercase ) elif task == "MLM": __a : Optional[Any] = TapasForMaskedLM(config=lowercase ) elif task == "INTERMEDIATE_PRETRAINING": __a : Optional[int] = TapasModel(config=lowercase ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowercase , lowercase , lowercase ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowercase ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) __a : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + """vocab.txt""" , model_max_length=5_1_2 ) tokenizer.save_pretrained(lowercase ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __SCREAMING_SNAKE_CASE : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
705
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Any = params __a : Optional[Any] = np.array(__UpperCamelCase ) __a : Union[str, Any] = np.array([len(__UpperCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __UpperCamelCase ): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self ): '''simple docstring''' return len(self.lengths ) def __lowerCamelCase ( self ): '''simple docstring''' assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = self.params.max_model_input_size __a : Union[str, Any] = self.lengths > max_len logger.info(f"""Splitting {sum(__UpperCamelCase )} too long sequences.""" ) def divide_chunks(__UpperCamelCase , __UpperCamelCase ): return [l[i : i + n] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase )] __a : int = [] __a : Union[str, Any] = [] if self.params.mlm: __a , __a : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: __a , __a : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __a : Any = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __a : int = np.insert(__UpperCamelCase , 0 , __UpperCamelCase ) if sub_s[-1] != sep_id: __a : str = np.insert(__UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase ) assert len(__UpperCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__UpperCamelCase ) new_tok_ids.extend(__UpperCamelCase ) new_lengths.extend([len(__UpperCamelCase ) for l in sub_seqs] ) __a : Dict = np.array(__UpperCamelCase ) __a : Tuple = np.array(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = len(self ) __a : List[str] = self.lengths > 11 __a : int = self.token_ids[indices] __a : Union[str, Any] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def __lowerCamelCase ( self ): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: __a : List[str] = self.params.special_tok_ids["""unk_token"""] __a : str = len(self ) __a : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __a : Optional[Any] = (unk_occs / self.lengths) < 0.5 __a : List[str] = self.token_ids[indices] __a : Optional[int] = self.lengths[indices] __a : Any = len(self ) logger.info(f"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.params.is_master: return logger.info(f"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = [t[0] for t in batch] __a : str = [t[1] for t in batch] assert len(__UpperCamelCase ) == len(__UpperCamelCase ) # Max for paddings __a : Optional[int] = max(__UpperCamelCase ) # Pad token ids if self.params.mlm: __a : int = self.params.special_tok_ids["""pad_token"""] else: __a : Tuple = self.params.special_tok_ids["""unk_token"""] __a : Any = [list(t.astype(__UpperCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(__UpperCamelCase )) for t in token_ids] assert len(tk_ ) == len(__UpperCamelCase ) assert all(len(__UpperCamelCase ) == max_seq_len_ for t in tk_ ) __a : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) __a : Optional[Any] = torch.tensor(__UpperCamelCase ) # (bs) return tk_t, lg_t
697
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __SCREAMING_SNAKE_CASE : List[Any] = pytest.mark.integration @require_faiss class SCREAMING_SNAKE_CASE__ ( __lowercase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__UpperCamelCase ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Dataset = self._create_dummy_dataset() __a : int = dset.map( lambda __UpperCamelCase , __UpperCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCamelCase , keep_in_memory=__UpperCamelCase ) __a : str = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __a : List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) dset.drop_index("""vecs""" ) def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __a : List[str] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__UpperCamelCase ) as tmp_file: dset.save_faiss_index("""vecs""" , tmp_file.name ) dset.load_faiss_index("""vecs2""" , tmp_file.name ) os.unlink(tmp_file.name ) __a : str = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" ) dset.drop_index("""vecs""" ) self.assertRaises(__UpperCamelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCamelCase ( self ): '''simple docstring''' from elasticsearch import Elasticsearch __a : Dataset = self._create_dummy_dataset() with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: __a : List[Any] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) __a : Dict = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} __a : List[Any] = Elasticsearch() dset.add_elasticsearch_index("""filename""" , es_client=__UpperCamelCase ) __a : Dict = dset.get_nearest_examples("""filename""" , """my_name-train_29""" ) self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" ) @require_faiss class SCREAMING_SNAKE_CASE__ ( __lowercase ): def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __a : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) __a : Union[str, Any] = 1 __a : List[str] = index.search(__UpperCamelCase ) self.assertRaises(__UpperCamelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __a : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] __a : Optional[int] = index.search_batch(__UpperCamelCase ) self.assertRaises(__UpperCamelCase , index.search_batch , queries[0] ) __a : int = [scores[0] for scores in total_scores] __a : str = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCamelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Optional[Any] = FaissIndex(string_factory="""Flat""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __a : str = FaissIndex(string_factory="""LSH""" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__UpperCamelCase ): __a : Union[str, Any] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) ) def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : Optional[int] = faiss.IndexFlat(5 ) __a : List[Any] = FaissIndex(custom_index=__UpperCamelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCamelCase ( self ): '''simple docstring''' import faiss __a : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__UpperCamelCase ) as tmp_file: index.save(tmp_file.name ) __a : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __a : Any = np.zeros(5 , dtype=np.floataa ) __a : List[str] = 1 __a : Optional[Any] = index.search(__UpperCamelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _snake_case ( lowercase ) -> Any: import faiss __a : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __a : str = "index.faiss" __a : Dict = F"""mock://{index_name}""" index.save(_lowerCamelCase , storage_options=mockfs.storage_options ) __a : int = FaissIndex.load(_lowerCamelCase , storage_options=mockfs.storage_options ) __a : List[Any] = np.zeros(5 , dtype=np.floataa ) __a : Optional[int] = 1 __a : List[str] = index.search(_lowerCamelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class SCREAMING_SNAKE_CASE__ ( __lowercase ): def __lowerCamelCase ( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch( """elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk: __a : Tuple = Elasticsearch() __a : Any = {"acknowledged": True} __a : str = ElasticSearchIndex(es_client=__UpperCamelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["""foo""", """bar""", """foobar"""] ) # single query __a : List[Any] = "foo" __a : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __a : Union[str, Any] = index.search(__UpperCamelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __a : Any = "foo" __a : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} __a : int = index.search(__UpperCamelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __a : List[Any] = ["foo", "bar", "foobar"] __a : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __a : int = index.search_batch(__UpperCamelCase ) __a : Dict = [scores[0] for scores in total_scores] __a : Any = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __UpperCamelCase ) # batched queries with timeout __a : Optional[int] = ["foo", "bar", "foobar"] __a : Dict = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} __a : List[Any] = index.search_batch(__UpperCamelCase , request_timeout=30 ) __a : List[str] = [scores[0] for scores in total_scores] __a : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCamelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __UpperCamelCase )
706
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = "" lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' super().__init__(self , **__UpperCamelCase ) __a : int = repo_info __a : int = token __a : Any = None def __lowerCamelCase ( self ): '''simple docstring''' if self.dir_cache is None: __a : Union[str, Any] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __a : List[str] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ): '''simple docstring''' if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open() def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : str = self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ): '''simple docstring''' self._get_dirs() __a : int = PurePosixPath(path.strip("""/""" ) ) __a : List[str] = {} for p, f in self.dir_cache.items(): __a : str = PurePosixPath(p.strip("""/""" ) ) __a : Optional[int] = p.parent if root == path: __a : List[str] = f __a : str = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
697
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _snake_case ( lowercase ) -> Any: __a : Dict = int(number**0.5 ) return number == sq * sq def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : str = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __a : int = x_den * y_den * z_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) top //= hcf bottom //= hcf return top, bottom def _snake_case ( lowercase = 3_5 ) -> List[str]: __a : str = set() __a : Tuple = 4_2 __a : str = Fraction(0 ) __a : List[str] = 4_2 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 __a : str = x_num * y_den + x_den * y_num __a : Optional[int] = x_den * y_den __a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Optional[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : int = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __a : Optional[Any] = x_den * x_den * y_den * y_den if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : str = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : List[str] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Union[str, Any] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : List[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=-1 __a : Tuple = x_num * y_num __a : Union[str, Any] = x_den * y_num + x_num * y_den __a : Optional[Any] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : int = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) # n=2 __a : Union[str, Any] = x_num * x_num * y_num * y_num __a : int = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ): __a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Dict = int(sqrt(_SCREAMING_SNAKE_CASE ) ) __a : Dict = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __a : Optional[Any] = add_three( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) unique_s.add(_SCREAMING_SNAKE_CASE ) for num, den in unique_s: total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return total.denominator + total.numerator if __name__ == "__main__": print(f'''{solution() = }''')
707
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase=2 , __UpperCamelCase=32 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=32 , __UpperCamelCase=4 , __UpperCamelCase=[0, 1, 2, 3] , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0_2 , __UpperCamelCase=3 , __UpperCamelCase=[1, 384, 24, 24] , __UpperCamelCase=True , __UpperCamelCase=None , ): '''simple docstring''' __a : List[str] = parent __a : Tuple = batch_size __a : str = image_size __a : int = patch_size __a : Dict = num_channels __a : int = is_training __a : Dict = use_labels __a : Union[str, Any] = hidden_size __a : Dict = num_hidden_layers __a : Dict = backbone_out_indices __a : Optional[int] = num_attention_heads __a : List[str] = intermediate_size __a : Optional[Any] = hidden_act __a : Dict = hidden_dropout_prob __a : Tuple = attention_probs_dropout_prob __a : Any = initializer_range __a : Any = num_labels __a : Optional[Any] = backbone_featmap_shape __a : List[Any] = scope __a : List[str] = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __a : Union[str, Any] = (image_size // patch_size) ** 2 __a : List[str] = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __a : Union[str, Any] = None if self.use_labels: __a : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __a : Tuple = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Optional[Any] = DPTModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : List[str] = self.num_labels __a : Union[str, Any] = DPTForDepthEstimation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : Tuple = model(__UpperCamelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = self.num_labels __a : Tuple = DPTForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() __a : str = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.prepare_config_and_inputs() __a , __a , __a : Tuple = config_and_inputs __a : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): lowercase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase__ = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = DPTModelTester(self ) __a : List[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __a : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __a : Any = model_class(__UpperCamelCase ) __a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __a : int = [*signature.parameters.keys()] __a : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common() __a : List[Any] = True if model_class in get_values(__UpperCamelCase ): continue __a : str = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() __a : Union[str, Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : List[Any] = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = False __a : Dict = True if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue __a : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.gradient_checkpointing_enable() model.train() __a : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) __a : Dict = model(**__UpperCamelCase ).loss loss.backward() def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common() __a : Any = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: __a : Any = model_class(config=__UpperCamelCase ) # Skip the check for the backbone __a : Optional[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __a : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCamelCase ( self ): '''simple docstring''' pass @slow def __lowerCamelCase ( self ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __a : int = DPTModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a , __a : int = self.model_tester.prepare_config_and_inputs_for_common() __a : Optional[int] = """add""" with self.assertRaises(__UpperCamelCase ): __a : int = DPTForDepthEstimation(__UpperCamelCase ) def _snake_case ( ) -> Any: __a : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : int = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) __a : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase ) __a : Union[str, Any] = prepare_img() __a : Any = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): __a : Optional[Any] = model(**__UpperCamelCase ) __a : int = outputs.predicted_depth # verify the predicted depth __a : Any = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __UpperCamelCase ) __a : int = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCamelCase , atol=1E-4 ) )
697
0
'''simple docstring''' from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[str] = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class SCREAMING_SNAKE_CASE__ ( A_ ): lowercase__ = '''efficientnet''' def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 600 , __UpperCamelCase = 2.0 , __UpperCamelCase = 3.1 , __UpperCamelCase = 8 , __UpperCamelCase = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase = [32, 16, 24, 40, 80, 112, 192] , __UpperCamelCase = [16, 24, 40, 80, 112, 192, 320] , __UpperCamelCase = [] , __UpperCamelCase = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase = 0.2_5 , __UpperCamelCase = "swish" , __UpperCamelCase = 2560 , __UpperCamelCase = "mean" , __UpperCamelCase = 0.0_2 , __UpperCamelCase = 0.0_0_1 , __UpperCamelCase = 0.9_9 , __UpperCamelCase = 0.5 , __UpperCamelCase = 0.2 , **__UpperCamelCase , ): '''simple docstring''' super().__init__(**__UpperCamelCase ) __a : Any = num_channels __a : List[Any] = image_size __a : Optional[Any] = width_coefficient __a : str = depth_coefficient __a : int = depth_divisor __a : List[Any] = kernel_sizes __a : Optional[int] = in_channels __a : Optional[Any] = out_channels __a : str = depthwise_padding __a : str = strides __a : Union[str, Any] = num_block_repeats __a : Optional[int] = expand_ratios __a : Tuple = squeeze_expansion_ratio __a : int = hidden_act __a : Optional[int] = hidden_dim __a : Tuple = pooling_type __a : Tuple = initializer_range __a : Union[str, Any] = batch_norm_eps __a : Any = batch_norm_momentum __a : Tuple = dropout_rate __a : Any = drop_connect_rate __a : Optional[Any] = sum(__UpperCamelCase ) * 4 class SCREAMING_SNAKE_CASE__ ( A_ ): lowercase__ = version.parse("1.11" ) @property def __lowerCamelCase ( self ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __lowerCamelCase ( self ): '''simple docstring''' return 1E-5
708
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) __a : Optional[int] = Vector() def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(__UpperCamelCase ) , """(0,0,0,0,0,1)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3, 4] ) self.assertEqual(len(__UpperCamelCase ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = Vector([1, 2] ) __a : List[str] = Vector([1, 2, 3, 4, 5] ) __a : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) __a : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Union[str, Any] = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Vector([1, 2, 3] ) __a : Any = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Vector([1, 2, 3] ) __a : Optional[Any] = Vector([2, -1, 4] ) # for test of dot product __a : Union[str, Any] = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" ) self.assertEqual((a * b) , 0 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Vector([1, 2, 3] ) __a : Optional[int] = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , __UpperCamelCase , __UpperCamelCase ) ) , """(3,4,7)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = Vector([1, 0, 0, 0, 0, 0] ) __a : Any = x.copy() self.assertEqual(str(__UpperCamelCase ) , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(__UpperCamelCase ) , """(0,1,0)""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(__UpperCamelCase , __UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) __a : List[Any] = Vector([1, 2, 3] ) self.assertEqual("""(14,32,50)""" , str(a * x ) ) self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(__UpperCamelCase ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : Union[str, Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) __a : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual( """|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
697
0
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# __SCREAMING_SNAKE_CASE : List[str] = [ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] __SCREAMING_SNAKE_CASE : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] __SCREAMING_SNAKE_CASE : Optional[Any] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks __SCREAMING_SNAKE_CASE : str = f'''down_blocks.{i}.resnets.{j}.''' __SCREAMING_SNAKE_CASE : int = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 __SCREAMING_SNAKE_CASE : str = f'''down_blocks.{i}.attentions.{j}.''' __SCREAMING_SNAKE_CASE : Dict = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks __SCREAMING_SNAKE_CASE : int = f'''up_blocks.{i}.resnets.{j}.''' __SCREAMING_SNAKE_CASE : Tuple = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 __SCREAMING_SNAKE_CASE : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' __SCREAMING_SNAKE_CASE : Union[str, Any] = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 __SCREAMING_SNAKE_CASE : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' __SCREAMING_SNAKE_CASE : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 __SCREAMING_SNAKE_CASE : List[Any] = f'''up_blocks.{i}.upsamplers.0.''' __SCREAMING_SNAKE_CASE : List[Any] = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) __SCREAMING_SNAKE_CASE : Union[str, Any] = 'mid_block.attentions.0.' __SCREAMING_SNAKE_CASE : int = 'middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): __SCREAMING_SNAKE_CASE : Tuple = f'''mid_block.resnets.{j}.''' __SCREAMING_SNAKE_CASE : Optional[int] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def _snake_case ( lowercase ) -> str: __a : Union[str, Any] = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: __a : Union[str, Any] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: __a : int = v.replace(lowerCAmelCase__ , lowerCAmelCase__ ) __a : List[str] = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: __a : str = v.replace(lowerCAmelCase__ , lowerCAmelCase__ ) __a : Tuple = v __a : str = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# __SCREAMING_SNAKE_CASE : Optional[int] = [ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): __SCREAMING_SNAKE_CASE : Dict = f'''encoder.down_blocks.{i}.resnets.{j}.''' __SCREAMING_SNAKE_CASE : int = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: __SCREAMING_SNAKE_CASE : Any = f'''down_blocks.{i}.downsamplers.0.''' __SCREAMING_SNAKE_CASE : Optional[int] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) __SCREAMING_SNAKE_CASE : int = f'''up_blocks.{i}.upsamplers.0.''' __SCREAMING_SNAKE_CASE : Optional[Any] = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): __SCREAMING_SNAKE_CASE : Tuple = f'''decoder.up_blocks.{i}.resnets.{j}.''' __SCREAMING_SNAKE_CASE : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): __SCREAMING_SNAKE_CASE : List[Any] = f'''mid_block.resnets.{i}.''' __SCREAMING_SNAKE_CASE : str = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) __SCREAMING_SNAKE_CASE : str = [ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def _snake_case ( lowercase ) -> int: return w.reshape(*w.shape , 1 , 1 ) def _snake_case ( lowercase ) -> List[Any]: __a : Dict = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: __a : Any = v.replace(lowerCAmelCase__ , lowerCAmelCase__ ) __a : int = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: __a : Dict = v.replace(lowerCAmelCase__ , lowerCAmelCase__ ) __a : Tuple = v __a : Dict = {v: vae_state_dict[k] for k, v in mapping.items()} __a : int = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"""mid.attn_1.{weight_name}.weight""" in k: print(F"""Reshaping {k} for SD format""" ) __a : Union[str, Any] = reshape_weight_for_sd(lowerCAmelCase__ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# __SCREAMING_SNAKE_CASE : List[Any] = [ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] __SCREAMING_SNAKE_CASE : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} __SCREAMING_SNAKE_CASE : Tuple = re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp __SCREAMING_SNAKE_CASE : str = {'q': 0, 'k': 1, 'v': 2} def _snake_case ( lowercase ) -> Dict: __a : Optional[Any] = {} __a : List[str] = {} __a : Any = {} for k, v in text_enc_dict.items(): if ( k.endswith(""".self_attn.q_proj.weight""" ) or k.endswith(""".self_attn.k_proj.weight""" ) or k.endswith(""".self_attn.v_proj.weight""" ) ): __a : int = k[: -len(""".q_proj.weight""" )] __a : List[str] = k[-len("""q_proj.weight""" )] if k_pre not in capture_qkv_weight: __a : Optional[Any] = [None, None, None] __a : int = v continue if ( k.endswith(""".self_attn.q_proj.bias""" ) or k.endswith(""".self_attn.k_proj.bias""" ) or k.endswith(""".self_attn.v_proj.bias""" ) ): __a : Optional[Any] = k[: -len(""".q_proj.bias""" )] __a : str = k[-len("""q_proj.bias""" )] if k_pre not in capture_qkv_bias: __a : Tuple = [None, None, None] __a : Any = v continue __a : Tuple = textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ ) __a : Any = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) __a : Dict = textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ ) __a : Union[str, Any] = torch.cat(lowerCAmelCase__ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" ) __a : Optional[int] = textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase__ ) __a : Optional[Any] = torch.cat(lowerCAmelCase__ ) return new_state_dict def _snake_case ( lowercase ) -> Optional[Any]: return text_enc_dict if __name__ == "__main__": __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) __SCREAMING_SNAKE_CASE : str = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors __SCREAMING_SNAKE_CASE : Any = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') __SCREAMING_SNAKE_CASE : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') __SCREAMING_SNAKE_CASE : str = osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): __SCREAMING_SNAKE_CASE : str = load_file(unet_path, device='cpu') else: __SCREAMING_SNAKE_CASE : List[Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') __SCREAMING_SNAKE_CASE : Any = torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): __SCREAMING_SNAKE_CASE : int = load_file(vae_path, device='cpu') else: __SCREAMING_SNAKE_CASE : Optional[Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): __SCREAMING_SNAKE_CASE : Tuple = load_file(text_enc_path, device='cpu') else: __SCREAMING_SNAKE_CASE : Optional[int] = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') __SCREAMING_SNAKE_CASE : str = torch.load(text_enc_path, map_location='cpu') # Convert the UNet model __SCREAMING_SNAKE_CASE : Optional[int] = convert_unet_state_dict(unet_state_dict) __SCREAMING_SNAKE_CASE : str = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model __SCREAMING_SNAKE_CASE : Union[str, Any] = convert_vae_state_dict(vae_state_dict) __SCREAMING_SNAKE_CASE : Dict = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper __SCREAMING_SNAKE_CASE : Union[str, Any] = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm __SCREAMING_SNAKE_CASE : str = {'transformer.' + k: v for k, v in text_enc_dict.items()} __SCREAMING_SNAKE_CASE : int = convert_text_enc_state_dict_vaa(text_enc_dict) __SCREAMING_SNAKE_CASE : List[str] = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: __SCREAMING_SNAKE_CASE : Tuple = convert_text_enc_state_dict(text_enc_dict) __SCREAMING_SNAKE_CASE : Tuple = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint __SCREAMING_SNAKE_CASE : int = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: __SCREAMING_SNAKE_CASE : List[str] = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: __SCREAMING_SNAKE_CASE : List[Any] = {'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
709
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand __SCREAMING_SNAKE_CASE : List[str] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) __SCREAMING_SNAKE_CASE : Optional[Any] = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) __SCREAMING_SNAKE_CASE : Tuple = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) __SCREAMING_SNAKE_CASE : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) __SCREAMING_SNAKE_CASE : Optional[int] = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) __SCREAMING_SNAKE_CASE : int = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _snake_case ( ) -> List[str]: __a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) ) __a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] __a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _snake_case ( lowercase = 1_0_0 ) -> Any: return (generate_random_hand() for _ in range(lowercase )) @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> int: assert PokerHand(lowercase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Any: assert PokerHand(lowercase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]: __a : Union[str, Any] = PokerHand(lowercase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , lowercase ) def _snake_case ( lowercase , lowercase ) -> Union[str, Any]: assert PokerHand(lowercase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def _snake_case ( lowercase , lowercase , lowercase ) -> int: assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected def _snake_case ( ) -> Union[str, Any]: __a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS] __a : Optional[int] = poker_hands.copy() shuffle(lowercase ) __a : List[str] = chain(sorted(lowercase ) ) for index, hand in enumerate(lowercase ): assert hand == poker_hands[index] def _snake_case ( ) -> List[str]: # Test that five high straights are compared correctly. __a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=lowercase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _snake_case ( ) -> List[str]: # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __a : Dict = PokerHand("""2C 4S AS 3D 5C""" ) __a : Dict = True __a : Optional[int] = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _snake_case ( ) -> Dict: # Problem number 54 from Project Euler # Testing from poker_hands.txt file __a : Tuple = 0 __a : int = os.path.abspath(os.path.dirname(lowercase ) ) __a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" ) with open(lowercase ) as file_hand: for line in file_hand: __a : Union[str, Any] = line[:1_4].strip() __a : Optional[Any] = line[1_5:].strip() __a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase ) __a : str = player.compare_with(lowercase ) if output == "Win": answer += 1 assert answer == 3_7_6
697
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE : Dict = { 'configuration_mask2former': [ 'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Mask2FormerConfig', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['Mask2FormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = [ 'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'Mask2FormerForUniversalSegmentation', 'Mask2FormerModel', 'Mask2FormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
710
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
697
0
'''simple docstring''' # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __SCREAMING_SNAKE_CASE : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
711
'''simple docstring''' from __future__ import annotations import bisect def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Union[str, Any] = len(lowercase ) while lo < hi: __a : List[str] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __a : int = mid + 1 else: __a : int = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> int: if hi < 0: __a : Any = len(lowercase ) while lo < hi: __a : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __a : List[str] = mid + 1 else: __a : Any = mid return lo def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_left(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase , lowercase = 0 , lowercase = -1 ) -> None: sorted_collection.insert(bisect_right(lowercase , lowercase , lowercase , lowercase ) , lowercase ) def _snake_case ( lowercase , lowercase ) -> int | None: __a : Dict = 0 __a : Any = len(lowercase ) - 1 while left <= right: __a : str = left + (right - left) // 2 __a : List[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __a : Optional[Any] = midpoint - 1 else: __a : Optional[int] = midpoint + 1 return None def _snake_case ( lowercase , lowercase ) -> int | None: __a : Optional[int] = bisect.bisect_left(lowercase , lowercase ) if index != len(lowercase ) and sorted_collection[index] == item: return index return None def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> int | None: if right < left: return None __a : Any = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase , lowercase , lowercase , midpoint - 1 ) else: return binary_search_by_recursion(lowercase , lowercase , midpoint + 1 , lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n').strip() __SCREAMING_SNAKE_CASE : Optional[Any] = sorted(int(item) for item in user_input.split(',')) __SCREAMING_SNAKE_CASE : List[str] = int(input('Enter a single number to be found in the list:\n')) __SCREAMING_SNAKE_CASE : Optional[int] = binary_search(collection, target) if result is None: print(f'''{target} was not found in {collection}.''') else: print(f'''{target} was found at position {result} in {collection}.''')
697
0
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : str = mock.Mock() __a : Optional[Any] = 500 __a : Optional[Any] = {} __a : Any = HTTPError __a : Union[str, Any] = {} # Download this model to make sure it's in the cache. __a : str = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_ ) as mock_head: __a : Optional[int] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = mock.Mock() __a : Any = 500 __a : List[Any] = {} __a : Tuple = HTTPError __a : List[str] = {} # Download this model to make sure it's in the cache. __a : Tuple = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_ ) as mock_head: __a : Optional[Any] = GPTaTokenizerFast.from_pretrained("""gpt2""" ) # This check we did call the fake head request mock_head.assert_called() def __lowerCamelCase ( self ): '''simple docstring''' try: __a : Optional[Any] = tempfile.mktemp() with open(lowerCAmelCase_ , """wb""" ) as f: http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , lowerCAmelCase_ ) __a : Union[str, Any] = AlbertTokenizer.from_pretrained(lowerCAmelCase_ ) finally: os.remove(lowerCAmelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("""tokenizer.json""" ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open("""tokenizer.json""" , """wb""" ) as f: http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , lowerCAmelCase_ ) __a : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("""tokenizer.json""" ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" ) @is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): lowercase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' __a : Optional[Any] = TOKEN HfFolder.save_token(lowerCAmelCase_ ) @classmethod def __lowerCamelCase ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="""test-tokenizer""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" ) except HTTPError: pass def __lowerCamelCase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __a : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.txt""" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __a : List[str] = BertTokenizer(lowerCAmelCase_ ) tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token ) __a : Optional[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""test-tokenizer""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCAmelCase_ , repo_id="""test-tokenizer""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token ) __a : Union[str, Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def __lowerCamelCase ( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: __a : Optional[int] = os.path.join(lowerCAmelCase_ , """vocab.txt""" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __a : Optional[int] = BertTokenizer(lowerCAmelCase_ ) tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token ) __a : Union[str, Any] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCAmelCase_ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token ) __a : str = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def __lowerCamelCase ( self ): '''simple docstring''' CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __a : str = os.path.join(lowerCAmelCase_ , """vocab.txt""" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __a : Any = CustomTokenizer(lowerCAmelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) __a : List[str] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: __a : Optional[int] = os.path.join(lowerCAmelCase_ , """vocab.txt""" ) with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __a : Dict = BertTokenizerFast.from_pretrained(lowerCAmelCase_ ) bert_tokenizer.save_pretrained(lowerCAmelCase_ ) __a : List[str] = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ ) tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token ) __a : Optional[Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" ) __a : int = AutoTokenizer.from_pretrained( f"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Trie() trie.add("""Hello 友達""" ) self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) trie.add("""Hello""" ) trie.data self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} ) def __lowerCamelCase ( self ): '''simple docstring''' __a : int = Trie() self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] ) trie.add("""[CLS]""" ) trie.add("""extra_id_1""" ) trie.add("""extra_id_100""" ) self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = Trie() trie.add("""A""" ) self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] ) self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[str] = Trie() trie.add("""TOKEN]""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = Trie() trie.add("""A""" ) trie.add("""P""" ) trie.add("""[SPECIAL_TOKEN]""" ) self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = Trie() trie.add("""AB""" ) trie.add("""B""" ) trie.add("""C""" ) self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Trie() trie.add("""ABC""" ) trie.add("""B""" ) trie.add("""CD""" ) self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = Trie() __a : Any = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCAmelCase_ , ["""AB""", """C"""] )
712
'''simple docstring''' from itertools import product def _snake_case ( lowercase , lowercase ) -> list[int]: __a : Optional[int] = sides_number __a : Union[str, Any] = max_face_number * dice_number __a : Optional[Any] = [0] * (max_total + 1) __a : Dict = 1 __a : str = range(lowercase , max_face_number + 1 ) for dice_numbers in product(lowercase , repeat=lowercase ): __a : int = sum(lowercase ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ) -> float: __a : Tuple = total_frequency_distribution( sides_number=4 , dice_number=9 ) __a : Union[str, Any] = total_frequency_distribution( sides_number=6 , dice_number=6 ) __a : str = 0 __a : Dict = 9 __a : str = 4 * 9 __a : Any = 6 for peter_total in range(lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __a : str = (4**9) * (6**6) __a : List[Any] = peter_wins_count / total_games_number __a : List[Any] = round(lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'''{solution() = }''')
697
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable __SCREAMING_SNAKE_CASE : Union[str, Any] = list[list[float | int]] def _snake_case ( lowercase , lowercase ) -> Matrix: __a : int = len(a_ ) __a : Matrix = [[0 for _ in range(size + 1 )] for _ in range(a_ )] __a : int __a : int __a : int __a : int __a : int __a : float for row in range(a_ ): for col in range(a_ ): __a : Any = matrix[row][col] __a : Optional[Any] = vector[row][0] __a : int = 0 __a : int = 0 while row < size and col < size: # pivoting __a : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a_ , a_ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __a : str = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , a_ ): __a : Tuple = augmented[rowa][col] / augmented[row][col] __a : Union[str, Any] = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , a_ ): for row in range(a_ ): __a : List[Any] = augmented[row][col] / augmented[col][col] for cola in range(a_ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(a_ ) ] def _snake_case ( lowercase ) -> Callable[[int], int]: __a : int = len(a_ ) __a : Matrix = [[0 for _ in range(a_ )] for _ in range(a_ )] __a : Matrix = [[0] for _ in range(a_ )] __a : Matrix __a : int __a : int __a : int for x_val, y_val in enumerate(a_ ): for col in range(a_ ): __a : str = (x_val + 1) ** (size - col - 1) __a : Dict = y_val __a : Union[str, Any] = solve(a_ , a_ ) def interpolated_func(lowercase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(a_ ) ) return interpolated_func def _snake_case ( lowercase ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _snake_case ( lowercase = question_function , lowercase = 1_0 ) -> int: __a : list[int] = [func(a_ ) for x_val in range(1 , order + 1 )] __a : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __a : int = 0 __a : Callable[[int], int] __a : int for poly in polynomials: __a : Tuple = 1 while func(a_ ) == poly(a_ ): x_val += 1 ret += poly(a_ ) return ret if __name__ == "__main__": print(f'''{solution() = }''')
713
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): '''simple docstring''' super().__init__() self.register_modules( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , ) def __lowerCamelCase ( self , __UpperCamelCase = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __a : Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Union[str, Any] = 1 elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Tuple = len(__UpperCamelCase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCamelCase )}.""" ) # get prompt text embeddings __a : Tuple = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __a : Union[str, Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __a : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __a : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __a : int = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __a , __a , __a : Union[str, Any] = text_embeddings.shape __a : Optional[Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 ) __a : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __a : Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __a : List[str] if negative_prompt is None: __a : Optional[Any] = [""""""] elif type(__UpperCamelCase ) is not type(__UpperCamelCase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !=""" f""" {type(__UpperCamelCase )}.""" ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): __a : Any = [negative_prompt] elif batch_size != len(__UpperCamelCase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: __a : Tuple = negative_prompt __a : Any = text_input_ids.shape[-1] __a : List[str] = self.tokenizer( __UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , ) __a : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __a : List[str] = uncond_embeddings.shape[1] __a : List[Any] = uncond_embeddings.repeat(__UpperCamelCase , __UpperCamelCase , 1 ) __a : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __a : List[Any] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __a : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __a : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __a : int = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __a : Any = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(self.device ) __a : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to( self.device ) else: __a : Optional[int] = torch.randn( __UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) __a : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __a : Optional[Any] = latents_reference.to(self.device ) __a : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __a : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2 __a : int = (latents_shape[2] - latents_shape_reference[2]) // 2 __a : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __a : Tuple = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __a : Optional[Any] = 0 if dx < 0 else dx __a : Optional[Any] = 0 if dy < 0 else dy __a : Optional[int] = max(-dx , 0 ) __a : Optional[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() __a : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __a : Dict = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __a : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __a : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __a : Optional[Any] = {} if accepts_eta: __a : Union[str, Any] = eta for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __a : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __a : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase ) # predict the noise residual __a : Union[str, Any] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __a , __a : List[str] = noise_pred.chunk(2 ) __a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __a : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __a : Optional[Any] = 1 / 0.1_8_2_1_5 * latents __a : Optional[int] = self.vae.decode(__UpperCamelCase ).sample __a : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __a : List[str] = self.feature_extractor(self.numpy_to_pil(__UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __a , __a : int = self.safety_checker( images=__UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __a : Optional[int] = None if output_type == "pil": __a : str = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
697
0
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __SCREAMING_SNAKE_CASE : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=18 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , ): '''simple docstring''' __a : List[str] = size if size is not None else {"""height""": 20, """width""": 20} __a : List[Any] = parent __a : Optional[int] = batch_size __a : Dict = num_channels __a : Union[str, Any] = image_size __a : List[Any] = min_resolution __a : str = max_resolution __a : int = size __a : Optional[int] = do_normalize __a : Any = do_convert_rgb __a : List[Any] = [512, 1024, 2048, 4096] __a : Union[str, Any] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16} def __lowerCamelCase ( self ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowerCamelCase ( self ): '''simple docstring''' __a : Tuple = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg""" __a : Dict = Image.open(requests.get(_A , stream=_A ).raw ).convert("""RGB""" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ): lowercase__ = PixaStructImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = PixaStructImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , """do_normalize""" ) ) self.assertTrue(hasattr(_A , """do_convert_rgb""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processor_tester.prepare_dummy_image() __a : Tuple = self.image_processing_class(**self.image_processor_dict ) __a : Tuple = 2048 __a : Tuple = image_processor(_A , return_tensors="""pt""" , max_patches=_A ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __a : Union[str, Any] = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a : Dict = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a : List[str] = image_processor( _A , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __a : Optional[int] = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 __a : Any = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(_A ): __a : Optional[int] = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches __a : Tuple = """Hello""" __a : Optional[Any] = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A , header_text=_A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a : Union[str, Any] = image_processor( _A , return_tensors="""pt""" , max_patches=_A , header_text=_A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) __a : Dict = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a : Any = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a : List[str] = image_processor( _A , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __a : List[str] = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a : List[str] = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a : List[Any] = image_processor( _A , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , ) @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ): lowercase__ = PixaStructImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = PixaStructImageProcessingTester(self , num_channels=4 ) __a : Union[str, Any] = 3 @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , """do_normalize""" ) ) self.assertTrue(hasattr(_A , """do_convert_rgb""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __a : Optional[int] = ( (self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input __a : str = image_processor( image_inputs[0] , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched __a : Union[str, Any] = image_processor( _A , return_tensors="""pt""" , max_patches=_A ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
714
'''simple docstring''' import numpy as np from PIL import Image def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : Any = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : Union[str, Any] = 0 __a : Dict = 0 __a : Optional[Any] = 0 __a : Tuple = 0 # compute the shape of the output matrix __a : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __a : int = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __a : Optional[Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : Optional[Any] = 0 __a : str = 0 return updated_arr def _snake_case ( lowercase , lowercase , lowercase ) -> np.ndarray: __a : int = np.array(lowercase ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) __a : int = 0 __a : Optional[Any] = 0 __a : str = 0 __a : List[Any] = 0 # compute the shape of the output matrix __a : int = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __a : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __a : Any = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __a : str = 0 __a : List[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __SCREAMING_SNAKE_CASE : str = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
697
0
'''simple docstring''' import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __SCREAMING_SNAKE_CASE : Union[str, Any] = 4 __SCREAMING_SNAKE_CASE : Tuple = 3 class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): pass def _snake_case ( lowercase ) -> Any: for shard in shards: for i in range(UpperCAmelCase__ ): yield {"i": i, "shard": shard} def _snake_case ( ) -> List[str]: __a : Union[str, Any] = int(os.environ["""RANK"""] ) __a : Union[str, Any] = int(os.environ["""WORLD_SIZE"""] ) __a : List[str] = ArgumentParser() parser.add_argument("""--streaming""" , type=UpperCAmelCase__ ) parser.add_argument("""--local_rank""" , type=UpperCAmelCase__ ) parser.add_argument("""--num_workers""" , type=UpperCAmelCase__ , default=0 ) __a : Any = parser.parse_args() __a : List[Any] = args.streaming __a : List[str] = args.num_workers __a : Tuple = {"""shards""": [F"""shard_{shard_idx}""" for shard_idx in range(UpperCAmelCase__ )]} __a : List[str] = IterableDataset.from_generator(UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ ) if not streaming: __a : Dict = Dataset.from_list(list(UpperCAmelCase__ ) ) __a : List[str] = split_dataset_by_node(UpperCAmelCase__ , rank=UpperCAmelCase__ , world_size=UpperCAmelCase__ ) __a : Any = torch.utils.data.DataLoader(UpperCAmelCase__ , num_workers=UpperCAmelCase__ ) __a : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __a : Dict = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __a : Union[str, Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
715
'''simple docstring''' import qiskit def _snake_case ( lowercase , lowercase ) -> qiskit.result.counts.Counts: __a : Any = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register __a : str = qiskit.QuantumCircuit(lowercase , lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator __a : Any = qiskit.execute(lowercase , lowercase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(lowercase ) if __name__ == "__main__": print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
697
0
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Any = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = AlbertTokenizer lowercase__ = AlbertTokenizerFast lowercase__ = True lowercase__ = True lowercase__ = True def __lowerCamelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __a : Optional[int] = AlbertTokenizer(__UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = """this is a test""" __a : Any = """this is a test""" return input_text, output_text def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = """<pad>""" __a : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__UpperCamelCase ) , 3_0000 ) def __lowerCamelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def __lowerCamelCase ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return __a : str = self.get_tokenizer() __a : Any = self.get_rust_tokenizer() __a : List[str] = """I was born in 92000, and this is falsé.""" __a : Optional[int] = tokenizer.tokenize(__UpperCamelCase ) __a : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) __a : List[Any] = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) __a : Optional[Any] = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) __a : Optional[int] = self.get_rust_tokenizer() __a : Optional[Any] = tokenizer.encode(__UpperCamelCase ) __a : int = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = AlbertTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase ) __a : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__UpperCamelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [48, 25, 21, 1289] ) __a : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) __a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) __a : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase ) self.assertListEqual( __UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : str = AlbertTokenizer(__UpperCamelCase ) __a : Dict = tokenizer.encode("""sequence builders""" ) __a : Dict = tokenizer.encode("""multi-sequence build""" ) __a : int = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) __a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
716
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: for attribute in key.split(""".""" ): __a : str = getattr(lowercase , lowercase ) if weight_type is not None: __a : Dict = getattr(lowercase , lowercase ).shape else: __a : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __a : Any = value elif weight_type == "weight_g": __a : int = value elif weight_type == "weight_v": __a : int = value elif weight_type == "bias": __a : List[Any] = value elif weight_type == "running_mean": __a : Union[str, Any] = value elif weight_type == "running_var": __a : Tuple = value elif weight_type == "num_batches_tracked": __a : Optional[int] = value elif weight_type == "inv_freq": __a : List[str] = value else: __a : List[str] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _snake_case ( lowercase , lowercase , lowercase ) -> Dict: __a : Dict = [] __a : Dict = fairseq_model.state_dict() __a : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): __a : int = False if "conv_layers" in name: load_conv_layer( lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , ) __a : List[Any] = True else: for key, mapped_key in MAPPING.items(): __a : Optional[int] = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __a : str = True if "*" in mapped_key: __a : Optional[int] = name.split(lowercase )[0].split(""".""" )[-2] __a : List[Any] = mapped_key.replace("""*""" , lowercase ) if "pos_bias_u" in name: __a : Union[str, Any] = None elif "pos_bias_v" in name: __a : List[Any] = None elif "weight_g" in name: __a : List[Any] = """weight_g""" elif "weight_v" in name: __a : List[Any] = """weight_v""" elif "bias" in name: __a : Optional[int] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj __a : str = """weight""" elif "running_mean" in name: __a : List[str] = """running_mean""" elif "inv_freq" in name: __a : Dict = """inv_freq""" elif "running_var" in name: __a : Union[str, Any] = """running_var""" elif "num_batches_tracked" in name: __a : int = """num_batches_tracked""" else: __a : Optional[int] = None set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase ) continue if not is_used: unused_weights.append(lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]: __a : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __a : Union[str, Any] = name.split(""".""" ) __a : Optional[Any] = int(items[0] ) __a : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __a : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) __a : Dict = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) __a : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowercase ) @torch.no_grad() def _snake_case ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> Optional[Any]: if config_path is not None: __a : Any = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act="""swish""" ) else: __a : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: __a : Optional[Any] = """rotary""" if is_finetuned: if dict_path: __a : List[Any] = Dictionary.load(lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __a : int = target_dict.pad_index __a : List[str] = target_dict.bos_index __a : str = target_dict.eos_index __a : Dict = len(target_dict.symbols ) __a : Any = os.path.join(lowercase , """vocab.json""" ) if not os.path.isdir(lowercase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) ) return os.makedirs(lowercase , exist_ok=lowercase ) __a : Dict = target_dict.indices # fairseq has the <pad> and <s> switched __a : Optional[Any] = 0 __a : List[Any] = 1 with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowercase , lowercase ) __a : int = WavaVecaCTCTokenizer( lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , ) __a : Optional[int] = True if config.feat_extract_norm == """layer""" else False __a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , ) __a : str = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase ) processor.save_pretrained(lowercase ) __a : List[str] = WavaVecaConformerForCTC(lowercase ) else: __a : Optional[int] = WavaVecaConformerForPreTraining(lowercase ) if is_finetuned: __a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __a : Optional[int] = argparse.Namespace(task="""audio_pretraining""" ) __a : Tuple = fairseq.tasks.setup_task(lowercase ) __a , __a , __a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase ) __a : Any = model[0].eval() recursively_load_weights(lowercase , lowercase , not is_finetuned ) hf_wavavec.save_pretrained(lowercase ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __SCREAMING_SNAKE_CASE : int = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
697
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): def __init__( self , *__UpperCamelCase , **__UpperCamelCase ): '''simple docstring''' warnings.warn( """The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use PoolFormerImageProcessor instead.""" , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
717
'''simple docstring''' import warnings from functools import wraps from typing import Callable def _snake_case ( lowercase ) -> Callable: @wraps(lowercase ) def _inner_fn(*lowercase , **lowercase ): warnings.warn( (F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , ) return fn(*lowercase , **lowercase ) return _inner_fn
697
0
'''simple docstring''' import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline __SCREAMING_SNAKE_CASE : int = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = "hopper-medium-v2" __SCREAMING_SNAKE_CASE : List[Any] = gym.make(env_name) __SCREAMING_SNAKE_CASE : List[Any] = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) __SCREAMING_SNAKE_CASE : str = env.reset() __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : int = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy __SCREAMING_SNAKE_CASE : Optional[int] = pipeline(obs, planning_horizon=32) # execute action in environment __SCREAMING_SNAKE_CASE : List[str] = env.step(denorm_actions) __SCREAMING_SNAKE_CASE : Tuple = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) __SCREAMING_SNAKE_CASE : Union[str, Any] = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
718
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ....feature_extraction_sequence_utils import SequenceFeatureExtractor from ....feature_extraction_utils import BatchFeature from ....file_utils import PaddingStrategy, TensorType from ....utils import logging __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["input_features", "attention_mask"] def __init__( self , __UpperCamelCase=80 , __UpperCamelCase=1_6000 , __UpperCamelCase=0.0 , __UpperCamelCase=10 , __UpperCamelCase=25 , __UpperCamelCase="hamming_window" , __UpperCamelCase=3_2_7_6_8.0 , __UpperCamelCase=0.9_7 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , **__UpperCamelCase , ): '''simple docstring''' super().__init__(feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase ) __a : List[str] = feature_size __a : List[str] = sampling_rate __a : int = padding_value __a : Any = hop_length __a : int = win_length __a : Tuple = frame_signal_scale __a : Union[str, Any] = preemphasis_coeff __a : List[str] = mel_floor __a : Union[str, Any] = normalize_means __a : Optional[Any] = normalize_vars __a : Optional[Any] = win_function __a : Union[str, Any] = return_attention_mask __a : List[Any] = win_length * sampling_rate // 1000 __a : List[Any] = hop_length * sampling_rate // 1000 __a : Optional[Any] = optimal_fft_length(self.sample_size ) __a : Any = (self.n_fft // 2) + 1 def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if self.win_function == "hamming_window": __a : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCamelCase ) else: __a : Dict = window_function(window_length=self.sample_size , name=self.win_function ) __a : Optional[Any] = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , ) __a : Any = spectrogram( one_waveform * self.frame_signal_scale , window=__UpperCamelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCamelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCamelCase , mel_floor=self.mel_floor , log_mel="""log""" , ) return msfc_features.T def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.normalize_means: __a : int = x[:input_length].mean(axis=0 ) __a : str = np.subtract(__UpperCamelCase , __UpperCamelCase ) if self.normalize_vars: __a : Dict = x[:input_length].std(axis=0 ) __a : Dict = np.divide(__UpperCamelCase , __UpperCamelCase ) if input_length < x.shape[0]: __a : Union[str, Any] = padding_value # make sure array is in float32 __a : Any = x.astype(np.floataa ) return x def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ): '''simple docstring''' __a : Tuple = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [self._normalize_one(__UpperCamelCase , __UpperCamelCase , self.padding_value ) for x, n in zip(__UpperCamelCase , __UpperCamelCase )] def __call__( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the ``sampling_rate`` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : Tuple = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Tuple = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Tuple = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : List[str] = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Any = [raw_speech] # extract fbank features __a : str = [self._extract_mfsc_features(__UpperCamelCase ) for one_waveform in raw_speech] # convert into correct format for padding __a : Optional[Any] = BatchFeature({"""input_features""": features} ) __a : Any = self.pad( __UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , ) # make sure list is in array format __a : int = padded_inputs.get("""input_features""" ) if isinstance(input_features[0] , __UpperCamelCase ): __a : Union[str, Any] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in input_features] __a : List[str] = padded_inputs.get("""attention_mask""" ) if attention_mask is not None: __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.intaa ) for array in attention_mask] if self.normalize_means or self.normalize_vars: __a : Optional[Any] = ( np.array(__UpperCamelCase , dtype=np.intaa ) if self._get_padding_strategies(__UpperCamelCase , max_length=__UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD and padding else None ) __a : int = self.normalize( padded_inputs["""input_features"""] , attention_mask=__UpperCamelCase ) if return_tensors is not None: __a : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
697
0
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def _snake_case ( ) -> List[Any]: __a : Union[str, Any] = 9, 1_4 # noqa: F841 __a : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 1_4], [3, 4, 9], [5, 4, 1_0], [1, 7, 1_1], ] __a : int = defaultdict(snake_case_ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) __a : Union[str, Any] = mst(snake_case_ ) __a : Optional[Any] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: __a : Union[str, Any] = tuple(answer[:2] ) __a : Dict = tuple(edge[::-1] ) assert edge in result or reverse in result
719
'''simple docstring''' __SCREAMING_SNAKE_CASE : int = 9.80_665 def _snake_case ( lowercase , lowercase , lowercase = g ) -> float: if fluid_density <= 0: raise ValueError("""Impossible fluid density""" ) if volume < 0: raise ValueError("""Impossible Object volume""" ) if gravity <= 0: raise ValueError("""Impossible Gravity""" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
697
0
from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( lowercase_ ): lowercase__ = ['''image_processor''', '''feature_extractor'''] lowercase__ = '''TvltImageProcessor''' lowercase__ = '''TvltFeatureExtractor''' def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__(image_processor=__UpperCamelCase , feature_extractor=__UpperCamelCase ) __a : Dict = image_processor __a : Tuple = feature_extractor def __call__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , *__UpperCamelCase , **__UpperCamelCase , ): '''simple docstring''' if images is None and audio is None: raise ValueError("""You need to specify either an `images` or `audio` input to process.""" ) __a : Dict = None if images is not None: __a : Tuple = self.image_processor(__UpperCamelCase , mask_pixel=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) if images_mixed is not None: __a : Optional[Any] = self.image_processor(__UpperCamelCase , is_mixed=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) if audio is not None: __a : Union[str, Any] = self.feature_extractor( __UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , mask_audio=__UpperCamelCase , **__UpperCamelCase ) __a : Union[str, Any] = {} if audio is not None: output_dict.update(__UpperCamelCase ) if images is not None: output_dict.update(__UpperCamelCase ) if images_mixed_dict is not None: output_dict.update(__UpperCamelCase ) return output_dict @property def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processor.model_input_names __a : str = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
720
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ): '''simple docstring''' __a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} __a : Dict = parent __a : Union[str, Any] = batch_size __a : Optional[int] = num_channels __a : Dict = min_resolution __a : List[Any] = max_resolution __a : int = do_resize __a : str = size __a : Optional[Any] = do_rescale __a : Optional[Any] = rescale_factor __a : str = do_normalize __a : Any = image_mean __a : Optional[Any] = image_std __a : Dict = do_pad def __lowerCamelCase ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ): '''simple docstring''' if not batched: __a : Union[str, Any] = image_inputs[0] if isinstance(__UpperCamelCase , Image.Image ): __a , __a : Tuple = image.size else: __a , __a : Tuple = image.shape[1], image.shape[2] if w < h: __a : Optional[int] = int(self.size["""shortest_edge"""] * h / w ) __a : Tuple = self.size["""shortest_edge"""] elif w > h: __a : Optional[Any] = self.size["""shortest_edge"""] __a : Any = int(self.size["""shortest_edge"""] * w / h ) else: __a : Any = self.size["""shortest_edge"""] __a : Optional[int] = self.size["""shortest_edge"""] else: __a : Any = [] for image in image_inputs: __a , __a : Any = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0] __a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ): lowercase__ = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): '''simple docstring''' __a : str = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """size""" ) ) self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) __a : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __UpperCamelCase ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' __a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , Image.Image ) # Test not batched input __a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) __a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , np.ndarray ) # Test not batched input __a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): '''simple docstring''' __a : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase , torch.Tensor ) # Test not batched input __a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values __a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __a : Dict = json.loads(f.read() ) __a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target} # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Any = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify orig_size __a : Any = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) ) @slow def __lowerCamelCase ( self ): '''simple docstring''' __a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __a : Tuple = json.loads(f.read() ) __a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target} __a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) __a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" ) # verify pixel values __a : List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase ) __a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) ) # verify area __a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) ) # verify boxes __a : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase ) __a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) ) # verify image_id __a : List[str] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) ) # verify is_crowd __a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) ) # verify class_labels __a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) ) # verify masks __a : Union[str, Any] = 82_2873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase ) # verify orig_size __a : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) ) # verify size __a : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
697
0
'''simple docstring''' import cva import numpy as np class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if k in (0.0_4, 0.0_6): __a : int = k __a : Any = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): '''simple docstring''' return str(self.k ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[str] = cva.imread(__UpperCamelCase , 0 ) __a , __a : Dict = img.shape __a : Optional[int] = [] __a : Any = img.copy() __a : str = cva.cvtColor(__UpperCamelCase , cva.COLOR_GRAY2RGB ) __a , __a : Any = np.gradient(__UpperCamelCase ) __a : Dict = dx**2 __a : Dict = dy**2 __a : Tuple = dx * dy __a : Dict = 0.0_4 __a : Any = self.window_size // 2 for y in range(__UpperCamelCase , h - offset ): for x in range(__UpperCamelCase , w - offset ): __a : List[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __a : Any = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __a : List[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __a : Dict = (wxx * wyy) - (wxy**2) __a : Any = wxx + wyy __a : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __SCREAMING_SNAKE_CASE : List[str] = HarrisCorner(0.04, 3) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
721
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __SCREAMING_SNAKE_CASE : Optional[int] = trt.Logger(trt.Logger.WARNING) __SCREAMING_SNAKE_CASE : Tuple = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__) __SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args() if args.tokenizer_name: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __SCREAMING_SNAKE_CASE : List[Any] = args.per_device_eval_batch_size __SCREAMING_SNAKE_CASE : int = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-fp32.engine' if args.fpaa: __SCREAMING_SNAKE_CASE : Dict = 'temp_engine/bert-fp16.engine' if args.inta: __SCREAMING_SNAKE_CASE : Tuple = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __SCREAMING_SNAKE_CASE : Optional[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __SCREAMING_SNAKE_CASE : List[Any] = [network.get_input(i) for i in range(network.num_inputs)] __SCREAMING_SNAKE_CASE : List[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __SCREAMING_SNAKE_CASE : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __SCREAMING_SNAKE_CASE : Dict = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __SCREAMING_SNAKE_CASE : Union[str, Any] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _snake_case ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[Any]: __a : Dict = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) __a : List[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) __a : str = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase ) # start time __a : Optional[Any] = time.time() # Run inference context.execute_async( bindings=[int(lowercase ) for d_inp in d_inputs] + [int(lowercase ), int(lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) cuda.memcpy_dtoh_async(lowercase , lowercase , lowercase ) # Synchronize the stream and take time stream.synchronize() # end time __a : str = time.time() __a : Any = end_time - start_time __a : Optional[int] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __SCREAMING_SNAKE_CASE : List[str] = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'].column_names __SCREAMING_SNAKE_CASE : Tuple = 'question' if 'question' in column_names else column_names[0] __SCREAMING_SNAKE_CASE : List[Any] = 'context' if 'context' in column_names else column_names[1] __SCREAMING_SNAKE_CASE : Tuple = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __SCREAMING_SNAKE_CASE : Tuple = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) __SCREAMING_SNAKE_CASE : Dict = min(args.max_seq_length, tokenizer.model_max_length) def _snake_case ( lowercase ) -> Tuple: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __a : Optional[Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __a : Optional[int] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase , stride=args.doc_stride , return_overflowing_tokens=lowercase , return_offsets_mapping=lowercase , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __a : Optional[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __a : Optional[Any] = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __a : Dict = tokenized_examples.sequence_ids(lowercase ) __a : Optional[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __a : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __a : int = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __SCREAMING_SNAKE_CASE : int = raw_datasets['validation'] # Validation Feature Creation __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __SCREAMING_SNAKE_CASE : List[Any] = default_data_collator __SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __SCREAMING_SNAKE_CASE : List[str] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _snake_case ( lowercase , lowercase , lowercase , lowercase="eval" ) -> Any: # Post-processing: we match the start logits and end logits to answers in the original context. __a : List[str] = postprocess_qa_predictions( examples=lowercase , features=lowercase , predictions=lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __a : List[str] = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: __a : List[str] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] __a : Optional[Any] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowercase , label_ids=lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _snake_case ( lowercase ) -> Optional[int]: return trt.volume(engine.get_binding_shape(lowercase ) ) * engine.get_binding_dtype(lowercase ).itemsize # Allocate device memory for inputs and outputs. __SCREAMING_SNAKE_CASE : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __SCREAMING_SNAKE_CASE : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __SCREAMING_SNAKE_CASE : str = cuda.mem_alloc(h_outputa.nbytes) __SCREAMING_SNAKE_CASE : Tuple = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __SCREAMING_SNAKE_CASE : Tuple = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') __SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 __SCREAMING_SNAKE_CASE : str = 0 __SCREAMING_SNAKE_CASE : str = timeit.default_timer() __SCREAMING_SNAKE_CASE : Dict = None for step, batch in enumerate(eval_dataloader): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = outputs __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(start_logits) __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __SCREAMING_SNAKE_CASE : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __SCREAMING_SNAKE_CASE : List[str] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __SCREAMING_SNAKE_CASE : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __SCREAMING_SNAKE_CASE : Tuple = nested_truncate(all_preds, len(eval_dataset)) __SCREAMING_SNAKE_CASE : str = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __SCREAMING_SNAKE_CASE : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds) __SCREAMING_SNAKE_CASE : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
697
0
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase = None ): '''simple docstring''' if components is None: __a : Union[str, Any] = [] __a : List[str] = list(__UpperCamelCase ) def __len__( self ): '''simple docstring''' return len(self.__components ) def __str__( self ): '''simple docstring''' return "(" + ",".join(map(__UpperCamelCase , self.__components ) ) + ")" def __add__( self , __UpperCamelCase ): '''simple docstring''' __a : Any = len(self ) if size == len(__UpperCamelCase ): __a : List[str] = [self.__components[i] + other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return Vector(__UpperCamelCase ) else: raise Exception("""must have the same size""" ) def __sub__( self , __UpperCamelCase ): '''simple docstring''' __a : Any = len(self ) if size == len(__UpperCamelCase ): __a : str = [self.__components[i] - other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return Vector(__UpperCamelCase ) else: # error case raise Exception("""must have the same size""" ) @overload def __mul__( self , __UpperCamelCase ): '''simple docstring''' ... @overload def __mul__( self , __UpperCamelCase ): '''simple docstring''' ... def __mul__( self , __UpperCamelCase ): '''simple docstring''' if isinstance(__UpperCamelCase , (float, int) ): __a : Optional[Any] = [c * other for c in self.__components] return Vector(__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ) and len(self ) == len(__UpperCamelCase ): __a : Dict = len(self ) __a : Optional[int] = [self.__components[i] * other.component(__UpperCamelCase ) for i in range(__UpperCamelCase )] return sum(__UpperCamelCase ) else: # error case raise Exception("""invalid operand!""" ) def __lowerCamelCase ( self ): '''simple docstring''' return Vector(self.__components ) def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("""index out of range""" ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' assert -len(self.__components ) <= pos < len(self.__components ) __a : Optional[Any] = value def __lowerCamelCase ( self ): '''simple docstring''' if len(self.__components ) == 0: raise Exception("""Vector is empty""" ) __a : int = [c**2 for c in self.__components] return math.sqrt(sum(__UpperCamelCase ) ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = False ): '''simple docstring''' __a : List[str] = self * other __a : List[Any] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _snake_case ( lowercase ) -> Vector: assert isinstance(lowercase , lowercase ) return Vector([0] * dimension ) def _snake_case ( lowercase , lowercase ) -> Vector: assert isinstance(lowercase , lowercase ) and (isinstance(lowercase , lowercase )) __a : List[str] = [0] * dimension __a : int = 1 return Vector(lowercase ) def _snake_case ( lowercase , lowercase , lowercase ) -> Vector: assert ( isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase ) and (isinstance(lowercase , (int, float) )) ) return x * scalar + y def _snake_case ( lowercase , lowercase , lowercase ) -> Vector: random.seed(lowercase ) __a : Union[str, Any] = [random.randint(lowercase , lowercase ) for _ in range(lowercase )] return Vector(lowercase ) class SCREAMING_SNAKE_CASE__ : def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' __a : Dict = matrix __a : Optional[int] = w __a : Dict = h def __str__( self ): '''simple docstring''' __a : Dict = """""" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , __UpperCamelCase ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): __a : Any = [] for i in range(self.__height ): __a : int = [ self.__matrix[i][j] + other.component(__UpperCamelCase , __UpperCamelCase ) for j in range(self.__width ) ] matrix.append(__UpperCamelCase ) return Matrix(__UpperCamelCase , self.__width , self.__height ) else: raise Exception("""matrix must have the same dimension!""" ) def __sub__( self , __UpperCamelCase ): '''simple docstring''' if self.__width == other.width() and self.__height == other.height(): __a : Any = [] for i in range(self.__height ): __a : Union[str, Any] = [ self.__matrix[i][j] - other.component(__UpperCamelCase , __UpperCamelCase ) for j in range(self.__width ) ] matrix.append(__UpperCamelCase ) return Matrix(__UpperCamelCase , self.__width , self.__height ) else: raise Exception("""matrices must have the same dimension!""" ) @overload def __mul__( self , __UpperCamelCase ): '''simple docstring''' ... @overload def __mul__( self , __UpperCamelCase ): '''simple docstring''' ... def __mul__( self , __UpperCamelCase ): '''simple docstring''' if isinstance(__UpperCamelCase , __UpperCamelCase ): # matrix-vector if len(__UpperCamelCase ) == self.__width: __a : List[str] = zero_vector(self.__height ) for i in range(self.__height ): __a : Dict = [ self.__matrix[i][j] * other.component(__UpperCamelCase ) for j in range(self.__width ) ] ans.change_component(__UpperCamelCase , sum(__UpperCamelCase ) ) return ans else: raise Exception( """vector must have the same size as the """ """number of columns of the matrix!""" ) elif isinstance(__UpperCamelCase , (int, float) ): # matrix-scalar __a : Optional[Any] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__UpperCamelCase , self.__width , self.__height ) return None def __lowerCamelCase ( self ): '''simple docstring''' return self.__height def __lowerCamelCase ( self ): '''simple docstring''' return self.__width def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("""change_component: indices out of bounds""" ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if 0 <= x < self.__height and 0 <= y < self.__width: __a : Any = value else: raise Exception("""change_component: indices out of bounds""" ) def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""" ) __a : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__UpperCamelCase ) ): __a : List[str] = minor[i][:y] + minor[i][y + 1 :] return Matrix(__UpperCamelCase , self.__width - 1 , self.__height - 1 ).determinant() def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__UpperCamelCase , __UpperCamelCase ) else: raise Exception("""Indices out of bounds""" ) def __lowerCamelCase ( self ): '''simple docstring''' if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if self.__height < 1: raise Exception("""Matrix has no element""" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __a : Union[str, Any] = [ self.__matrix[0][y] * self.cofactor(0 , __UpperCamelCase ) for y in range(self.__width ) ] return sum(__UpperCamelCase ) def _snake_case ( lowercase ) -> Matrix: __a : list[list[float]] = [[0] * n for _ in range(lowercase )] return Matrix(lowercase , lowercase , lowercase ) def _snake_case ( lowercase , lowercase , lowercase , lowercase ) -> Matrix: random.seed(lowercase ) __a : list[list[float]] = [ [random.randint(lowercase , lowercase ) for _ in range(lowercase )] for _ in range(lowercase ) ] return Matrix(lowercase , lowercase , lowercase )
700
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = 42 lowercase__ = 42 def __init__( self , __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self , __UpperCamelCase = 1 , __UpperCamelCase = 50 , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , **__UpperCamelCase , ): '''simple docstring''' __a : int = self.unet.config.sample_size __a : Optional[int] = (batch_size, 3, img_size, img_size) __a : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a : Dict = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a : Dict = self.scheduler.schedule[t] __a : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a : Tuple = self.scheduler.add_noise_to_input(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a : List[Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a : str = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a : Union[str, Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a : Tuple = self.scheduler.step_correct( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , step_output.prev_sample , step_output["""derivative"""] , ) __a : Tuple = step_output.prev_sample __a : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 ) __a : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a : List[Any] = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
697
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ): lowercase__ = ["audio_values", "audio_mask"] def __init__( self , __UpperCamelCase=2048 , __UpperCamelCase=1 , __UpperCamelCase=[16, 16] , __UpperCamelCase=128 , __UpperCamelCase=4_4100 , __UpperCamelCase=86 , __UpperCamelCase=2048 , __UpperCamelCase=0.0 , **__UpperCamelCase , ): '''simple docstring''' super().__init__( feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase , ) __a : List[str] = spectrogram_length __a : str = num_channels __a : Union[str, Any] = patch_size __a : List[str] = feature_size // self.patch_size[1] __a : Optional[int] = n_fft __a : Any = sampling_rate // hop_length_to_sampling_rate __a : Dict = sampling_rate __a : Dict = padding_value __a : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__UpperCamelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T def __lowerCamelCase ( self , __UpperCamelCase ): '''simple docstring''' __a : List[Any] = spectrogram( __UpperCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) __a : List[str] = log_spec[:, :-1] __a : Union[str, Any] = log_spec - 20.0 __a : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , **__UpperCamelCase , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" f""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __a : str = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) __a : Any = is_batched_numpy or ( isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __a : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ): __a : Dict = np.asarray(__UpperCamelCase , dtype=np.floataa ) elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __a : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __a : Optional[int] = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __a : List[str] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __UpperCamelCase ): __a : Optional[int] = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __a : Optional[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __a : Any = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __a : str = np.array(__UpperCamelCase ).astype(np.floataa ) # convert into correct format for padding __a : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __a : int = np.ones([len(__UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __a : Tuple = padded_audio_features * self.padding_value for i in range(len(__UpperCamelCase ) ): __a : List[Any] = audio_features[i] __a : Dict = feature # return as BatchFeature if return_attention_mask: __a : str = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __a : Dict = {"""audio_values""": padded_audio_features} __a : Union[str, Any] = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase ) return encoded_inputs
701
'''simple docstring''' def _snake_case ( lowercase ) -> bool: if not isinstance(lowercase , lowercase ): raise ValueError("""check_bouncy() accepts only integer arguments""" ) __a : str = str(lowercase ) __a : Any = """""".join(sorted(lowercase ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def _snake_case ( lowercase = 9_9 ) -> int: if not 0 < percent < 1_0_0: raise ValueError("""solution() only accepts values from 0 to 100""" ) __a : List[str] = 0 __a : Union[str, Any] = 1 while True: if check_bouncy(lowercase ): bouncy_num += 1 if (bouncy_num / num) * 1_0_0 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
697
0
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( 'The `inpainting.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionInpaintPipeline` instead.' )
702
'''simple docstring''' import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( lowercase , lowercase , lowercase ) -> Any: # Construct model if gpta_config_file == "": __a : Dict = GPTaConfig() else: __a : Optional[Any] = GPTaConfig.from_json_file(lowercase ) __a : Union[str, Any] = GPTaModel(lowercase ) # Load weights from numpy load_tf_weights_in_gpta(lowercase , lowercase , lowercase ) # Save pytorch-model __a : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME __a : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , lowercase ) print(F"""Save configuration file to {pytorch_config_dump_path}""" ) with open(lowercase , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __SCREAMING_SNAKE_CASE : Dict = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
697
0