code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __a ( A__ : list[int] ): # This function is recursive
SCREAMING_SNAKE_CASE = len(A__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
SCREAMING_SNAKE_CASE = array[0]
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = []
while not is_found and i < array_length:
if array[i] < pivot:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [element for element in array[i:] if element >= array[i]]
SCREAMING_SNAKE_CASE = longest_subsequence(A__ )
if len(A__ ) > len(A__ ):
SCREAMING_SNAKE_CASE = temp_array
else:
i += 1
SCREAMING_SNAKE_CASE = [element for element in array[1:] if element >= pivot]
SCREAMING_SNAKE_CASE = [pivot, *longest_subsequence(A__ )]
if len(A__ ) > len(A__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __a ( A__ : Dict , A__ : int=10 ):
SCREAMING_SNAKE_CASE = []
for _ in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __a ( A__ : Union[str, Any] , A__ : Any=10 ):
SCREAMING_SNAKE_CASE = []
for step in range(A__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(A__ , "schedule.bin" )
torch.save(scheduler.state_dict() , A__ )
SCREAMING_SNAKE_CASE = torch.load(A__ )
scheduler.load_state_dict(A__ )
return lrs
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE = criterion(__lowerCamelCase , __lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
lowerCamelCase__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCamelCase__ = 1_0
def _snake_case ( self : str , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str]=None ):
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data
SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE = unwrap_schedule(__lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
__lowerCamelCase , __lowerCamelCase , tol=1e-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **__lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=f"failed for {scheduler_func} in save and reload" )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = fn
def __call__( self : Dict , *__lowerCamelCase : int , **__lowerCamelCase : str ):
return self.fn(*__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) ) | 698 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "imagegpt"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs | 698 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__A : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __a ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : List[str]=8 ):
SCREAMING_SNAKE_CASE = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : MultilingualCLIP , __lowerCamelCase : XLMRobertaTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , __lowerCamelCase : VQModel , ):
super().__init__()
self.register_modules(
text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , movq=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
if latents is None:
SCREAMING_SNAKE_CASE = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
SCREAMING_SNAKE_CASE = latents.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase , max_length=77 , return_attention_mask=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = text_inputs.input_ids
SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE = text_input_ids.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = text_inputs.attention_mask.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE = prompt_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE = text_encoder_hidden_states.repeat_interleave(__lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE = text_mask.repeat_interleave(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(__lowerCamelCase ) is not type(__lowerCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !="
f" {type(__lowerCamelCase )}." )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(__lowerCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=77 , truncation=__lowerCamelCase , return_attention_mask=__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = uncond_input.input_ids.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = uncond_input.attention_mask.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE = negative_prompt_embeds.repeat(1 , __lowerCamelCase )
SCREAMING_SNAKE_CASE = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowerCamelCase )
SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.repeat(1 , __lowerCamelCase , 1 )
SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , __lowerCamelCase , -1 )
SCREAMING_SNAKE_CASE = uncond_text_mask.repeat_interleave(__lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(__lowerCamelCase , __lowerCamelCase , prev_module_hook=__lowerCamelCase )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(self.safety_checker , __lowerCamelCase , prev_module_hook=__lowerCamelCase )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : Union[str, Any] ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCamelCase )
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 100 , __lowerCamelCase : float = 4.0 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , ):
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = 1
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}" )
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._encode_prompt(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.cat(__lowerCamelCase , dim=0 )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.cat(__lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(__lowerCamelCase , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=__lowerCamelCase )
self.scheduler.set_timesteps(__lowerCamelCase , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps
SCREAMING_SNAKE_CASE = self.unet.config.in_channels
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_new_h_w(__lowerCamelCase , __lowerCamelCase , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=__lowerCamelCase , timestep=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , added_cond_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(__lowerCamelCase , force_not_quantize=__lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase ) | 698 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train() | 698 | 1 |
from __future__ import annotations
import requests
__A : Optional[int] = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __a ( A__ : str , A__ : int = 1 , A__ : str = "new" , A__ : list | None = None ):
SCREAMING_SNAKE_CASE = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(A__ ) - valid_terms ) ):
SCREAMING_SNAKE_CASE = F"Invalid search term: {invalid_search_terms}"
raise ValueError(A__ )
SCREAMING_SNAKE_CASE = requests.get(
F"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
SCREAMING_SNAKE_CASE = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(A__ )}
SCREAMING_SNAKE_CASE = {}
for id_ in range(A__ ):
SCREAMING_SNAKE_CASE = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE = tokenizer("Hello there" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase ).loss
SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(__lowerCamelCase ).numpy()
SCREAMING_SNAKE_CASE = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 698 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( A__ : Optional[int] , A__ : Optional[Any] , A__ : int ):
# Construct model
if gpta_config_file == "":
SCREAMING_SNAKE_CASE = GPTaConfig()
else:
SCREAMING_SNAKE_CASE = GPTaConfig.from_json_file(A__ )
SCREAMING_SNAKE_CASE = GPTaModel(A__ )
# Load weights from numpy
load_tf_weights_in_gpta(A__ , A__ , A__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , A__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__A : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path) | 698 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@slow
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 698 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : str = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Dict = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Dict = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _snake_case ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[int]="auto" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : Any=0.9 , __lowerCamelCase : int=5 , __lowerCamelCase : List[str]=500 , __lowerCamelCase : Optional[int]="gpt2-large" , __lowerCamelCase : Union[str, Any]=-1 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : List[str]=25 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=25 , ):
SCREAMING_SNAKE_CASE = compute_mauve(
p_text=__lowerCamelCase , q_text=__lowerCamelCase , p_features=__lowerCamelCase , q_features=__lowerCamelCase , p_tokens=__lowerCamelCase , q_tokens=__lowerCamelCase , num_buckets=__lowerCamelCase , pca_max_data=__lowerCamelCase , kmeans_explained_var=__lowerCamelCase , kmeans_num_redo=__lowerCamelCase , kmeans_max_iter=__lowerCamelCase , featurize_model_name=__lowerCamelCase , device_id=__lowerCamelCase , max_text_length=__lowerCamelCase , divergence_curve_discretization_size=__lowerCamelCase , mauve_scaling_factor=__lowerCamelCase , verbose=__lowerCamelCase , seed=__lowerCamelCase , )
return out | 698 |
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__A : str = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , __lowerCamelCase )
super().__init__(**__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__lowerCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(__lowerCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
f"type of {first_element} unknown: {type(__lowerCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(__lowerCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
__lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , )
truncated_inputs.append(__lowerCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(__lowerCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(__lowerCamelCase )
return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(__lowerCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(__lowerCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
__lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
__lowerCamelCase , __lowerCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : int , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=None ):
# Get padding strategy
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy | 698 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : List[str] , __lowerCamelCase : int = None , __lowerCamelCase : int = None ):
super().__init__()
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = vocab
SCREAMING_SNAKE_CASE = merges
SCREAMING_SNAKE_CASE = BytePairTokenizer(__lowerCamelCase , __lowerCamelCase , sequence_length=__lowerCamelCase )
@classmethod
def _snake_case ( cls : int , __lowerCamelCase : GPTaTokenizer , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = [" ".join(__lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
return cls(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( cls : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
return cls.from_tokenizer(__lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _snake_case ( cls : int , __lowerCamelCase : str ):
return cls(**__lowerCamelCase )
def _snake_case ( self : List[Any] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _snake_case ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int = None ):
SCREAMING_SNAKE_CASE = self.tf_tokenizer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.ones_like(__lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pad_model_inputs(
__lowerCamelCase , max_seq_length=__lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids} | 698 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( A__ : Tuple , A__ : Union[str, Any] , A__ : Union[str, Any] , A__ : str , A__ : Any ):
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(A__ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=A__ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )["module"]
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=A__ , rstrip=A__ )
SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , "tokenizer_config.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE = json.load(A__ )
SCREAMING_SNAKE_CASE = "MLukeTokenizer"
with open(os.path.join(A__ , "tokenizer_config.json" ) , "w" ) as f:
json.dump(A__ , A__ )
with open(os.path.join(A__ , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(A__ , A__ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(["@"] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(["#"] )[0]
SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F"encoder.layer.{layer_index}.attention.self."
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict["entity_predictions.bias"]
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(A__ , strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"Unexpected unexpected_keys: {unexpected_keys}" )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(A__ , task="entity_classification" )
SCREAMING_SNAKE_CASE = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(A__ , entity_spans=[span] , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
F" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE = "Tokyo is the capital of <mask>."
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(A__ , entity_spans=[span] , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model(**A__ )
SCREAMING_SNAKE_CASE = encoding["input_ids"][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(A__ ) )
model.save_pretrained(A__ )
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = ["[MASK]", "[PAD]", "[UNK]"]
SCREAMING_SNAKE_CASE = [json.loads(A__ ) for line in open(A__ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F"{language}:{entity_name}"
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 698 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableDiffusionSAGPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = False
def _snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _snake_case ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "."
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "."
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "."
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , width=768 , height=512 , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape == (1, 512, 768, 3) | 698 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : Tuple , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE = torch.load(A__ )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = os.path.join(
A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(A__ ):
SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" )
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {"total_size": total_size}
SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : Optional[int] = parser.parse_args()
__A , __A : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__A : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 698 | 1 |
import numpy as np
import qiskit
def __a ( A__ : int = 8 , A__ : int | None = None ):
SCREAMING_SNAKE_CASE = np.random.default_rng(seed=A__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE = rng.integers(2 , size=A__ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE = rng.integers(2 , size=A__ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE = rng.integers(2 , size=A__ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(A__ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(A__ ):
if alice_state[index] == 1:
bbaa_circ.x(A__ )
if alice_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(A__ ):
if bob_basis[index] == 1:
bbaa_circ.h(A__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE = qiskit.execute(A__ , A__ , shots=1 , seed_simulator=A__ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE = job.result().get_counts(A__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
A__ , A__ , A__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE = gen_key[:key_len] if len(A__ ) >= key_len else gen_key.ljust(A__ , "0" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod() | 698 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 698 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass | 698 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __a ( ):
SCREAMING_SNAKE_CASE = 9
SCREAMING_SNAKE_CASE = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE = kruskal(A__ , A__ )
SCREAMING_SNAKE_CASE = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(A__ ) == sorted(A__ ) | 698 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __a ( A__ : str , A__ : List[str] ):
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A__ : Tuple , A__ : List[Any] , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : int ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def __a ( A__ : int , A__ : str , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __a ( A__ : str , A__ : str ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
SCREAMING_SNAKE_CASE = features.copy()
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A__ : Any , A__ : List[str] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , cache_dir=A__ , split=A__ ).read()
_check_json_dataset(A__ , A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __a ( A__ : Tuple , A__ : Optional[Any] , A__ : List[Any] ):
if issubclass(A__ , A__ ):
SCREAMING_SNAKE_CASE = jsonl_path
elif issubclass(A__ , A__ ):
SCREAMING_SNAKE_CASE = [jsonl_path]
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
def __a ( A__ : str , A__ : Dict , A__ : Any=("train",) ):
assert isinstance(A__ , A__ )
for split in splits:
SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A__ : List[Any] , A__ : Dict , A__ : Dict ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = JsonDatasetReader({"train": jsonl_path} , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __a ( A__ : int , A__ : Optional[Any] , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader({"train": jsonl_path} , features=A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A__ : Dict , A__ : Optional[Any] , A__ : Dict ):
if split:
SCREAMING_SNAKE_CASE = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE = "train"
SCREAMING_SNAKE_CASE = {"train": jsonl_path, "test": jsonl_path}
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __a ( A__ : Dict ):
return json.load(A__ )
def __a ( A__ : Optional[int] ):
return [json.loads(A__ ) for line in buffer]
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json_function(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(exported_content[0] , __lowerCamelCase )
assert len(__lowerCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , lines=__lowerCamelCase , orient=__lowerCamelCase , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json(__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCamelCase ) == 10
def _snake_case ( self : Any , __lowerCamelCase : List[Any] ):
with pytest.raises(__lowerCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("data" ) / f"test.json.{extension}"
SCREAMING_SNAKE_CASE = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(__lowerCamelCase , __lowerCamelCase , compression=__lowerCamelCase ).write()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
SCREAMING_SNAKE_CASE = f.read()
with fsspec.open(__lowerCamelCase , "rb" , compression="infer" ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert exported_content == original_content | 698 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main() | 698 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__A : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : int ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
if prompt is not None:
SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , __lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowerCamelCase : Any ):
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=None ):
SCREAMING_SNAKE_CASE = load_image(__lowerCamelCase )
if prompt is not None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
f"Received an invalid text input, got - {type(__lowerCamelCase )} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation." )
SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.tokenizer(text=__lowerCamelCase , add_special_tokens=__lowerCamelCase ).input_ids
SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE = torch.tensor(__lowerCamelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , header_text=__lowerCamelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.tokenizer(__lowerCamelCase , return_tensors=self.framework )
model_inputs.update(__lowerCamelCase )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
SCREAMING_SNAKE_CASE = self.image_processor(images=__lowerCamelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE = None
return model_inputs
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __lowerCamelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE = self.model.generate(__lowerCamelCase , **__lowerCamelCase , **__lowerCamelCase )
return model_outputs
def _snake_case ( self : List[str] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE = {
"generated_text": self.tokenizer.decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , )
}
records.append(__lowerCamelCase )
return records | 698 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __a ( A__ : Any , A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[int]=1024 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
SCREAMING_SNAKE_CASE = list(zip(A__ , A__ ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(A__ : int ):
return tok(A__ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
SCREAMING_SNAKE_CASE = new_src + " " + src
SCREAMING_SNAKE_CASE = new_tgt + " " + tgt
if is_too_big(A__ ) or is_too_big(A__ ): # cant fit, finalize example
finished_src.append(A__ )
finished_tgt.append(A__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A__ )
finished_tgt.append(A__ )
return finished_src, finished_tgt
def __a ( A__ : Union[str, Any] , A__ : Path , A__ : Tuple , A__ : str ):
SCREAMING_SNAKE_CASE = Path(A__ )
save_path.mkdir(exist_ok=A__ )
for split in ["train"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F"{split}.source", data_dir / F"{split}.target"
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(A__ ).open().readlines()]
SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(A__ ).open().readlines()]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pack_examples(A__ , A__ , A__ , A__ )
print(F"packed {split} split from {len(A__ )} examples -> {len(A__ )}." )
Path(save_path / F"{split}.source" ).open("w" ).write("\n".join(A__ ) )
Path(save_path / F"{split}.target" ).open("w" ).write("\n".join(A__ ) )
for split in ["val", "test"]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(A__ , save_path / F"{split}.source" )
shutil.copyfile(A__ , save_path / F"{split}.target" )
def __a ( ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=A__ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=A__ , default=128 )
parser.add_argument("--data_dir" , type=A__ )
parser.add_argument("--save_path" , type=A__ )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 698 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text | 698 | 1 |
import math
def __a ( A__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( A__ : float = 0.1 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(A__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
from __future__ import annotations
import numpy as np
def __a ( A__ : np.ndarray ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.shape(A__ )
if rows != columns:
SCREAMING_SNAKE_CASE = (
"'table' has to be of square shaped array but got a "
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(A__ )
SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE = np.zeros((rows, columns) )
for i in range(A__ ):
for j in range(A__ ):
SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE = 1
for j in range(A__ , A__ ):
SCREAMING_SNAKE_CASE = sum(lower[i][k] * upper[k][j] for k in range(A__ ) )
SCREAMING_SNAKE_CASE = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 | 1 |
def __a ( A__ : int , A__ : int ):
return int((input_a, input_a).count(0 ) != 0 )
def __a ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1)) | 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __a ( A__ : Dict , A__ : Tuple ):
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A__ : Any , A__ : Dict , A__ : Dict ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = TextDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_text_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : List[str] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = TextDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
_check_text_dataset(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A__ : List[Any] , A__ : Any , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
SCREAMING_SNAKE_CASE = TextDatasetReader(A__ , cache_dir=A__ , split=A__ ).read()
_check_text_dataset(A__ , A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __a ( A__ : Any , A__ : List[Any] , A__ : Any ):
if issubclass(A__ , A__ ):
SCREAMING_SNAKE_CASE = text_path
elif issubclass(A__ , A__ ):
SCREAMING_SNAKE_CASE = [text_path]
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
SCREAMING_SNAKE_CASE = TextDatasetReader(A__ , cache_dir=A__ ).read()
_check_text_dataset(A__ , A__ )
def __a ( A__ : str , A__ : Any , A__ : Optional[Any]=("train",) ):
assert isinstance(A__ , A__ )
for split in splits:
SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __a ( A__ : List[Any] , A__ : Union[str, Any] , A__ : List[Any] ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = TextDatasetReader({"train": text_path} , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_text_datasetdict(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def __a ( A__ : List[Any] , A__ : List[str] , A__ : Dict ):
SCREAMING_SNAKE_CASE = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE = {"text": "string"}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = TextDatasetReader({"train": text_path} , features=A__ , cache_dir=A__ ).read()
_check_text_datasetdict(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __a ( A__ : str , A__ : List[Any] , A__ : List[Any] ):
if split:
SCREAMING_SNAKE_CASE = {split: text_path}
else:
SCREAMING_SNAKE_CASE = "train"
SCREAMING_SNAKE_CASE = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE = tmp_path / "cache"
SCREAMING_SNAKE_CASE = {"text": "string"}
SCREAMING_SNAKE_CASE = TextDatasetReader(A__ , cache_dir=A__ ).read()
_check_text_datasetdict(A__ , A__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 698 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "imagegpt"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs | 698 | 1 |
import argparse
import os
import re
__A : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
__A : Any = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Optional[int] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : int = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : int = re.compile(r'\[([^\]]+)\]')
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def __a ( A__ : str , A__ : Dict="" , A__ : List[str]=None , A__ : Tuple=None ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
SCREAMING_SNAKE_CASE = ["\n".join(lines[:index] )]
else:
SCREAMING_SNAKE_CASE = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
SCREAMING_SNAKE_CASE = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(A__ ) )
if index < len(A__ ) - 1:
SCREAMING_SNAKE_CASE = [lines[index + 1]]
index += 1
else:
SCREAMING_SNAKE_CASE = []
else:
blocks.append("\n".join(A__ ) )
SCREAMING_SNAKE_CASE = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append("\n".join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __a ( A__ : Optional[Any] ):
def _inner(A__ : Any ):
return key(A__ ).lower().replace("_" , "" )
return _inner
def __a ( A__ : str , A__ : Union[str, Any]=None ):
# If no key is provided, we use a noop.
def noop(A__ : Any ):
return x
if key is None:
SCREAMING_SNAKE_CASE = noop
# Constants are all uppercase, they go first.
SCREAMING_SNAKE_CASE = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
SCREAMING_SNAKE_CASE = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
SCREAMING_SNAKE_CASE = [obj for obj in objects if not key(A__ )[0].isupper()]
SCREAMING_SNAKE_CASE = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def __a ( A__ : int ):
# This inner function sort imports between [ ].
def _replace(A__ : Tuple ):
SCREAMING_SNAKE_CASE = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
SCREAMING_SNAKE_CASE = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
SCREAMING_SNAKE_CASE = import_statement.split("\n" )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
SCREAMING_SNAKE_CASE = 2 if lines[1].strip() == "[" else 1
SCREAMING_SNAKE_CASE = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
SCREAMING_SNAKE_CASE = sort_objects(A__ , key=lambda A__ : x[1] )
SCREAMING_SNAKE_CASE = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , lines[1] )
else:
SCREAMING_SNAKE_CASE = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
SCREAMING_SNAKE_CASE = keys[:-1]
SCREAMING_SNAKE_CASE = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
SCREAMING_SNAKE_CASE = _re_bracket_content.sub(_replace , A__ )
return import_statement
def __a ( A__ : Optional[Any] , A__ : str=True ):
with open(A__ , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(
A__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
SCREAMING_SNAKE_CASE = main_blocks[block_idx]
SCREAMING_SNAKE_CASE = block.split("\n" )
# Get to the start of the imports.
SCREAMING_SNAKE_CASE = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
SCREAMING_SNAKE_CASE = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
SCREAMING_SNAKE_CASE = "\n".join(block_lines[line_idx:-1] )
SCREAMING_SNAKE_CASE = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
SCREAMING_SNAKE_CASE = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
SCREAMING_SNAKE_CASE = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
SCREAMING_SNAKE_CASE = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
SCREAMING_SNAKE_CASE = [(i, key) for i, key in enumerate(A__ ) if key is not None]
SCREAMING_SNAKE_CASE = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
for i in range(len(A__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
SCREAMING_SNAKE_CASE = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
SCREAMING_SNAKE_CASE = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(A__ ) )
def __a ( A__ : Optional[Any]=True ):
SCREAMING_SNAKE_CASE = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE = sort_imports(os.path.join(A__ , "__init__.py" ) , check_only=A__ )
if result:
SCREAMING_SNAKE_CASE = [os.path.join(A__ , "__init__.py" )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__A : Dict = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 698 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train() | 698 | 1 |
def __a ( A__ : int ):
if not isinstance(A__ , A__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
SCREAMING_SNAKE_CASE = str(A__ )
SCREAMING_SNAKE_CASE = "".join(sorted(A__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __a ( A__ : float = 99 ):
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
while True:
if check_bouncy(A__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}') | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
import numpy as np
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE = (0, 0)
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
def __eq__( self : List[str] , __lowerCamelCase : str ):
return self.position == cell.position
def _snake_case ( self : Optional[int] ):
print(self.position )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=(5, 5) ):
SCREAMING_SNAKE_CASE = np.zeros(__lowerCamelCase )
SCREAMING_SNAKE_CASE = world_size[0]
SCREAMING_SNAKE_CASE = world_size[1]
def _snake_case ( self : Optional[int] ):
print(self.w )
def _snake_case ( self : int , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
SCREAMING_SNAKE_CASE = cell.position[0]
SCREAMING_SNAKE_CASE = cell.position[1]
SCREAMING_SNAKE_CASE = []
for n in neughbour_cord:
SCREAMING_SNAKE_CASE = current_x + n[0]
SCREAMING_SNAKE_CASE = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
SCREAMING_SNAKE_CASE = Cell()
SCREAMING_SNAKE_CASE = (x, y)
SCREAMING_SNAKE_CASE = cell
neighbours.append(__lowerCamelCase )
return neighbours
def __a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
_open.append(A__ )
while _open:
SCREAMING_SNAKE_CASE = np.argmin([n.f for n in _open] )
SCREAMING_SNAKE_CASE = _open[min_f]
_closed.append(_open.pop(A__ ) )
if current == goal:
break
for n in world.get_neigbours(A__ ):
for c in _closed:
if c == n:
continue
SCREAMING_SNAKE_CASE = current.g + 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = n.position
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = goal.position
SCREAMING_SNAKE_CASE = (ya - ya) ** 2 + (xa - xa) ** 2
SCREAMING_SNAKE_CASE = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(A__ )
SCREAMING_SNAKE_CASE = []
while current.parent is not None:
path.append(current.position )
SCREAMING_SNAKE_CASE = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
__A : int = Gridworld()
# Start position and goal
__A : List[Any] = Cell()
__A : Optional[int] = (0, 0)
__A : List[Any] = Cell()
__A : Optional[Any] = (4, 4)
print(f'path from {start.position} to {goal.position}')
__A : Optional[int] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
__A : Optional[int] = 1
print(world.w) | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Tuple=99 , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Union[str, Any]=5 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Dict=37 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Optional[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = MegatronBertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : str , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = MegatronBertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = MegatronBertForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = MegatronBertForNextSentencePrediction(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = MegatronBertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , next_sentence_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = MegatronBertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MegatronBertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MegatronBertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = MegatronBertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
# test_resize_embeddings = False
lowerCamelCase__ = False
def _snake_case ( self : int , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = MegatronBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCamelCase )
def __a ( A__ : int ):
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
__A : int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("Model is not available." )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
SCREAMING_SNAKE_CASE = os.path.join(os.environ["MYDIR"] , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MegatronBertModel.from_pretrained(__lowerCamelCase )
model.to(__lowerCamelCase )
model.half()
SCREAMING_SNAKE_CASE = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
SCREAMING_SNAKE_CASE = output[0, ii, jj]
SCREAMING_SNAKE_CASE = expected[3 * ii + jj]
SCREAMING_SNAKE_CASE = "ii={} jj={} a={} b={}".format(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.assertTrue(math.isclose(__lowerCamelCase , __lowerCamelCase , rel_tol=__lowerCamelCase , abs_tol=__lowerCamelCase ) , msg=__lowerCamelCase ) | 698 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(A__ )
elif "subsample" in key:
SCREAMING_SNAKE_CASE = s_dict.pop(A__ )
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : List[str] , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = mam_aaa["args"]
SCREAMING_SNAKE_CASE = mam_aaa["model"]
SCREAMING_SNAKE_CASE = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(A__ )
rename_keys(A__ )
SCREAMING_SNAKE_CASE = state_dict["decoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE = args.share_decoder_input_output_embed
SCREAMING_SNAKE_CASE = [int(A__ ) for i in args.conv_kernel_sizes.split("," )]
SCREAMING_SNAKE_CASE = SpeechaTextConfig(
vocab_size=A__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(A__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=A__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A__ , num_beams=5 , max_length=200 , use_cache=A__ , decoder_start_token_id=2 , early_stopping=A__ , )
SCREAMING_SNAKE_CASE = SpeechaTextForConditionalGeneration(A__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.model.load_state_dict(A__ , strict=A__ )
if len(A__ ) > 0 and not set(A__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE = lm_head_weights
model.save_pretrained(A__ )
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__A : Optional[int] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path) | 698 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@slow
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 698 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() ) | 698 |
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "markuplm"
def __init__( self : str , __lowerCamelCase : Any=30522 , __lowerCamelCase : str=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : str=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : int=0 , __lowerCamelCase : Any=0 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Dict=256 , __lowerCamelCase : List[Any]=1024 , __lowerCamelCase : Optional[Any]=216 , __lowerCamelCase : str=1001 , __lowerCamelCase : Any=32 , __lowerCamelCase : List[str]=50 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ):
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE = max_depth
SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE = tag_pad_id
SCREAMING_SNAKE_CASE = subs_pad_id
SCREAMING_SNAKE_CASE = xpath_unit_hidden_size | 698 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 1 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
model_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
ckpt_arr.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Disk" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) , Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(FadeOut(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase ) , )
self.wait() | 698 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : Dict=2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = np.random.default_rng(__lowerCamelCase )
SCREAMING_SNAKE_CASE = length
SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Tuple ):
return self.length
def __getitem__( self : List[Any] , __lowerCamelCase : Optional[Any] ):
return {"x": self.x[i], "y": self.y[i]}
class _SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Dict=0 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : Union[str, Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE = True
def _snake_case ( self : int , __lowerCamelCase : str=None ):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE = False
return x * self.a[0] + self.b[0]
class _SCREAMING_SNAKE_CASE ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Dict=0 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=False ):
super().__init__()
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE = True
def _snake_case ( self : List[Any] , __lowerCamelCase : Dict=None ):
if self.first_batch:
print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE = False
return x * self.a + self.b
def __a ( A__ : List[Any] , A__ : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
SCREAMING_SNAKE_CASE = load_dataset("csv" , data_files=A__ )
SCREAMING_SNAKE_CASE = datasets["train"].unique("label" )
SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(A__ )}
def tokenize_function(A__ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ , padding="max_length" )
if "label" in examples:
SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE = datasets.map(
A__ , batched=A__ , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(A__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=2 )
SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=1 )
return train_dataloader, eval_dataloader | 698 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
__A : List[str] = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 698 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : int = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "bert"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=30522 , __lowerCamelCase : Any=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Any=3072 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Optional[Any] , ):
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Dict ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] ) | 698 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : Tuple , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE = torch.load(A__ )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = os.path.join(
A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(A__ ):
SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" )
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {"total_size": total_size}
SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : Optional[int] = parser.parse_args()
__A , __A : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__A : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 698 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = eval_examples
SCREAMING_SNAKE_CASE = post_process_function
def _snake_case ( self : Optional[int] , __lowerCamelCase : str=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str = "eval" ):
SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE = self.get_eval_dataloader(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE = time.time()
try:
SCREAMING_SNAKE_CASE = eval_loop(
__lowerCamelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
SCREAMING_SNAKE_CASE = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str = "test" ):
SCREAMING_SNAKE_CASE = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE = self.compute_metrics
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE = time.time()
try:
SCREAMING_SNAKE_CASE = eval_loop(
__lowerCamelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
SCREAMING_SNAKE_CASE = compute_metrics
SCREAMING_SNAKE_CASE = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , "predict" )
SCREAMING_SNAKE_CASE = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase ) | 698 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 698 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__A : Tuple = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def __a ( A__ : str = "dhaka" , A__ : int = 5 ):
SCREAMING_SNAKE_CASE = min(A__ , 50 ) # Prevent abuse!
SCREAMING_SNAKE_CASE = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
SCREAMING_SNAKE_CASE = requests.get("https://www.google.com/search" , params=A__ , headers=A__ )
SCREAMING_SNAKE_CASE = BeautifulSoup(html.text , "html.parser" )
SCREAMING_SNAKE_CASE = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
SCREAMING_SNAKE_CASE = json.dumps(A__ )
SCREAMING_SNAKE_CASE = json.loads(A__ )
SCREAMING_SNAKE_CASE = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , A__ , )
if not matched_google_image_data:
return 0
SCREAMING_SNAKE_CASE = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(A__ ) , )
SCREAMING_SNAKE_CASE = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
SCREAMING_SNAKE_CASE = bytes(A__ , "ascii" ).decode(
"unicode-escape" )
SCREAMING_SNAKE_CASE = bytes(A__ , "ascii" ).decode(
"unicode-escape" )
SCREAMING_SNAKE_CASE = urllib.request.build_opener()
SCREAMING_SNAKE_CASE = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(A__ )
SCREAMING_SNAKE_CASE = F"query_{query.replace(' ' , '_' )}"
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , F"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__A : Any = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print('Please provide a search term.')
raise | 698 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass | 698 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = (DPMSolverSinglestepScheduler,)
lowerCamelCase__ = (("num_inference_steps", 2_5),)
def _snake_case ( self : str , **__lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**__lowerCamelCase )
return config
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any]=0 , **__lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : Any ):
pass
def _snake_case ( self : List[str] , __lowerCamelCase : str=0 , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : Dict , __lowerCamelCase : str=None , **__lowerCamelCase : Union[str, Any] ):
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def _snake_case ( self : List[str] ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _snake_case ( self : Any ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def _snake_case ( self : Optional[Any] ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , algorithm_type="dpmsolver++" , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _snake_case ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _snake_case ( self : Dict ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , algorithm_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _snake_case ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def _snake_case ( self : Union[str, Any] ):
self.check_over_configs(variance_type=__lowerCamelCase )
self.check_over_configs(variance_type="learned_range" )
def _snake_case ( self : str ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.full_loop(use_karras_sigmas=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa | 698 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Optional[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main() | 698 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a ( A__ : Optional[Any] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE = [1, 0]
SCREAMING_SNAKE_CASE = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
SCREAMING_SNAKE_CASE = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
SCREAMING_SNAKE_CASE = lambda A__ : x.reverse()
SCREAMING_SNAKE_CASE = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
SCREAMING_SNAKE_CASE = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
SCREAMING_SNAKE_CASE = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
SCREAMING_SNAKE_CASE = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 698 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : str=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Tuple=99 , __lowerCamelCase : int=32 , __lowerCamelCase : Any=2 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : int=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]="None" , __lowerCamelCase : int=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = position_biased_input
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = TFDebertaVaModel(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = TFDebertaVaForMaskedLM(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFDebertaVaForSequenceClassification(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFDebertaVaForTokenClassification(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = TFDebertaVaForQuestionAnswering(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFDebertaVaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : List[str] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self : Dict ):
pass
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
SCREAMING_SNAKE_CASE = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) | 698 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A : str = Lock()
def __a ( A__ : Dict , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : Tuple , A__ : Dict , A__ : Dict ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(A__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE = min(A__ , A__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(A__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE = max(A__ , A__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(A__ )
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE = Pipe()
SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=A__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE = temp_rs
SCREAMING_SNAKE_CASE = temp_rr
for i in range(1 , len(A__ ) - 1 ):
SCREAMING_SNAKE_CASE = Pipe()
SCREAMING_SNAKE_CASE = Pipe()
process_array_.append(
Process(
target=A__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE = temp_rs
SCREAMING_SNAKE_CASE = temp_rr
process_array_.append(
Process(
target=A__ , args=(
len(A__ ) - 1,
arr[len(A__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(A__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(A__ ) ):
SCREAMING_SNAKE_CASE = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __a ( ):
SCREAMING_SNAKE_CASE = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*A__ )
SCREAMING_SNAKE_CASE = odd_even_transposition(A__ )
print("Sorted List\n" )
print(*A__ )
if __name__ == "__main__":
main() | 698 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text | 698 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : List[Any]=13 , __lowerCamelCase : Dict=7 , __lowerCamelCase : Any=True , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : Dict=16 , __lowerCamelCase : List[str]=36 , __lowerCamelCase : Any=6 , __lowerCamelCase : Tuple=6 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : int=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Any=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_hidden_groups
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Optional[int] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _snake_case ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : str , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : str , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = AlbertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def _snake_case ( self : Dict ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) ) | 698 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __a ( A__ : List[str] , A__ : Dict=False ):
SCREAMING_SNAKE_CASE = OmegaConf.load(A__ )
if display:
print(yaml.dump(OmegaConf.to_container(A__ ) ) )
return config
def __a ( A__ : List[Any] , A__ : int=None , A__ : Dict=None ):
if conf_path is None:
SCREAMING_SNAKE_CASE = "./model_checkpoints/vqgan_only.yaml"
SCREAMING_SNAKE_CASE = load_config(A__ , display=A__ )
SCREAMING_SNAKE_CASE = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE = "./model_checkpoints/vqgan_only.pt"
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location=A__ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE = sd["state_dict"]
model.load_state_dict(A__ , strict=A__ )
model.to(A__ )
del sd
return model
def __a ( A__ : Dict , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.encode(A__ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
SCREAMING_SNAKE_CASE = model.decode(A__ )
return xrec
def __a ( A__ : str , A__ : Optional[int]=False ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = string.rsplit("." , 1 )
if reload:
SCREAMING_SNAKE_CASE = importlib.import_module(A__ )
importlib.reload(A__ )
return getattr(importlib.import_module(A__ , package=A__ ) , cls )
def __a ( A__ : Dict ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __a ( A__ : Dict , A__ : List[str] , A__ : Dict=True , A__ : Optional[Any]=True ):
SCREAMING_SNAKE_CASE = instantiate_from_config(A__ )
if sd is not None:
model.load_state_dict(A__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __a ( A__ : List[Any] , A__ : Optional[int] , A__ : List[str] , A__ : Optional[int] ):
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
SCREAMING_SNAKE_CASE = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
SCREAMING_SNAKE_CASE = {"state_dict": None}
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=A__ , eval_mode=A__ )["model"]
return model, global_step | 698 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 | 1 |
def __a ( A__ : Optional[Any] ): # noqa: E741
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [0] * n
SCREAMING_SNAKE_CASE = [False] * n
SCREAMING_SNAKE_CASE = [False] * n
def dfs(A__ : Optional[Any] , A__ : List[str] , A__ : Optional[Any] , A__ : int ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE = dfs(A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = min(low[at] , A__ )
return out_edge_count
for i in range(A__ ):
if not visited[i]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = dfs(A__ , A__ , -1 , A__ )
SCREAMING_SNAKE_CASE = out_edge_count > 1
for x in range(len(A__ ) ):
if is_art[x] is True:
print(A__ )
# Adjacency list of graph
__A : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : int = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "luke"
def __init__( self : Any , __lowerCamelCase : Union[str, Any]=50267 , __lowerCamelCase : List[Any]=500000 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : Any=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Union[str, Any]=512 , __lowerCamelCase : Any=2 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : str=1e-12 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Tuple , ):
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = entity_vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = entity_emb_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_entity_aware_attention
SCREAMING_SNAKE_CASE = classifier_dropout | 698 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "imagegpt"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : str = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "vit_msn"
def __init__( self : Tuple , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : str=1e-06 , __lowerCamelCase : List[str]=224 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = qkv_bias | 698 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train() | 698 | 1 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : int = get_logger(__name__)
__A : Dict = Path(__file__).parent / 'model_card_template.md'
__A : Dict = uuida().hex
__A : Optional[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__A : Optional[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__A : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __a ( A__ : Union[Dict, str, None] = None ):
SCREAMING_SNAKE_CASE = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(A__ , A__ ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(A__ , A__ ):
ua += "; " + user_agent
return ua
def __a ( A__ : str , A__ : Optional[str] = None , A__ : Optional[str] = None ):
if token is None:
SCREAMING_SNAKE_CASE = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE = whoami(A__ )["name"]
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __a ( A__ : Optional[int] , A__ : List[Any] ):
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(A__ , "local_rank" ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE = args.hub_token if hasattr(A__ , "hub_token" ) else None
SCREAMING_SNAKE_CASE = get_full_repo_name(A__ , token=A__ )
SCREAMING_SNAKE_CASE = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=A__ , model_name=A__ , repo_name=A__ , dataset_name=args.dataset_name if hasattr(A__ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(A__ , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(A__ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(A__ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(A__ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(A__ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(A__ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(A__ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(A__ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(A__ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(A__ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , "README.md" )
model_card.save(A__ )
def __a ( A__ : Optional[str] , A__ : Optional[str] = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE = str(Path(A__ ).as_posix() )
SCREAMING_SNAKE_CASE = re.search(R"snapshots/([^/]+)/" , A__ )
if search is None:
return None
SCREAMING_SNAKE_CASE = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(A__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Optional[int] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__A : Dict = os.path.join(hf_cache_home, 'diffusers')
def __a ( A__ : Optional[str] = None , A__ : Optional[str] = None ):
if new_cache_dir is None:
SCREAMING_SNAKE_CASE = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE = old_diffusers_cache
SCREAMING_SNAKE_CASE = Path(A__ ).expanduser()
SCREAMING_SNAKE_CASE = Path(A__ ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE = new_cache_dir / old_blob_path.relative_to(A__ )
new_blob_path.parent.mkdir(parents=A__ , exist_ok=A__ )
os.replace(A__ , A__ )
try:
os.symlink(A__ , A__ )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__A : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
__A : Any = int(f.read())
except ValueError:
__A : int = 0
if cache_version < 1:
__A : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__A : Dict = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'the directory exists and can be written to.'
)
def __a ( A__ : str , A__ : Optional[str] = None ):
if variant is not None:
SCREAMING_SNAKE_CASE = weights_name.split("." )
SCREAMING_SNAKE_CASE = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE = ".".join(A__ )
return weights_name
def __a ( A__ : int , *,
A__ : Union[str, Any] , A__ : Optional[Any] , A__ : Union[str, Any] , A__ : Any , A__ : Union[str, Any] , A__ : Optional[Any] , A__ : int , A__ : Optional[int] , A__ : Any , A__ : Tuple , A__ : Dict=None , ):
SCREAMING_SNAKE_CASE = str(A__ )
if os.path.isfile(A__ ):
return pretrained_model_name_or_path
elif os.path.isdir(A__ ):
if os.path.isfile(os.path.join(A__ , A__ ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(A__ , A__ , A__ ) ):
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ , A__ )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(A__ ).base_version ) >= version.parse("0.20.0" )
):
try:
SCREAMING_SNAKE_CASE = hf_hub_download(
A__ , filename=_add_variant(A__ , A__ ) , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , local_files_only=A__ , use_auth_token=A__ , user_agent=A__ , subfolder=A__ , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , A__ , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A__ , A__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(A__ , A__ )}' so that the correct variant file can be added." , A__ , )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE = hf_hub_download(
A__ , filename=A__ , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , local_files_only=A__ , use_auth_token=A__ , user_agent=A__ , subfolder=A__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"this model name. Check the model page at "
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" ) | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
from torch import nn
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE = class_size
SCREAMING_SNAKE_CASE = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
SCREAMING_SNAKE_CASE = nn.Linear(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Dict ):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
SCREAMING_SNAKE_CASE = self.mlp(__lowerCamelCase )
return logits | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = '▁'
__A : Dict = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__A : List[str] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__A : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1_0_2_4,
}
__A : str = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__A : Optional[Any] = {'mustc': MUSTC_LANGS}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = MAX_MODEL_INPUT_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
lowerCamelCase__ = []
def __init__( self : Any , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : List[Any]="<unk>" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_upper_case=__lowerCamelCase , do_lower_case=__lowerCamelCase , tgt_lang=__lowerCamelCase , lang_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = do_upper_case
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = load_json(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = spm_file
SCREAMING_SNAKE_CASE = load_spm(__lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE = lang_codes
SCREAMING_SNAKE_CASE = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE = [f"<lang:{lang}>" for lang in self.langs]
SCREAMING_SNAKE_CASE = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
SCREAMING_SNAKE_CASE = self.lang_tokens
SCREAMING_SNAKE_CASE = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE = {}
@property
def _snake_case ( self : Union[str, Any] ):
return len(self.encoder )
@property
def _snake_case ( self : List[str] ):
return self._tgt_lang
@tgt_lang.setter
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE = [lang_code_id]
def _snake_case ( self : List[Any] , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : Tuple ):
return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] )
def _snake_case ( self : Dict , __lowerCamelCase : int ):
return self.decoder.get(__lowerCamelCase , self.unk_token )
def _snake_case ( self : Dict , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE = self.sp_model.decode(__lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.sp_model.decode(__lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _snake_case ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (str(__lowerCamelCase ), str(__lowerCamelCase ))
def __a ( A__ : str , A__ : Dict[str, Any] ):
SCREAMING_SNAKE_CASE = sentencepiece.SentencePieceProcessor(**A__ )
spm.Load(str(A__ ) )
return spm
def __a ( A__ : str ):
with open(A__ , "r" ) as f:
return json.load(A__ )
def __a ( A__ : Dict , A__ : str ):
with open(A__ , "w" ) as f:
json.dump(A__ , A__ , indent=2 ) | 698 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Any=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=4 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=0 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = projection_dim
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
SCREAMING_SNAKE_CASE = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = TFDPRContextEncoder(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _snake_case ( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = TFDPRReader(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowerCamelCase )
@slow
def _snake_case ( self : Dict ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRContextEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFDPRReader.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
SCREAMING_SNAKE_CASE = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 698 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@slow
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 698 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : str=13 , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Dict=True , __lowerCamelCase : str=99 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[str]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Union[str, Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Any=4 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self : List[Any] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = 50000
SCREAMING_SNAKE_CASE = (1, 6, vocab_size)
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 |
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
def __a ( A__ : bytes ):
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def __a ( A__ : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 1 |
from heapq import heappop, heappush
import numpy as np
def __a ( A__ : np.ndarray , A__ : tuple[int, int] , A__ : tuple[int, int] , A__ : bool , ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = grid.shape
SCREAMING_SNAKE_CASE = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [(0, source)], set()
SCREAMING_SNAKE_CASE = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = np.empty((rows, cols) , dtype=A__ )
SCREAMING_SNAKE_CASE = None
while queue:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heappop(A__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = predecessors[x, y]
path.append(A__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE = dist + 1
SCREAMING_SNAKE_CASE = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__A : List[Any] = {'facebook/blenderbot_small-90M': 5_1_2}
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
SCREAMING_SNAKE_CASE = set(A__ )
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict="__start__" , __lowerCamelCase : List[Any]="__end__" , __lowerCamelCase : Any="__unk__" , __lowerCamelCase : Optional[int]="__null__" , **__lowerCamelCase : Optional[int] , ):
super().__init__(unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , **__lowerCamelCase )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
@property
def _snake_case ( self : str ):
return len(self.encoder )
def _snake_case ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = re.sub("([.,!?()])" , r" \1" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = re.sub("(')" , r" \1 " , __lowerCamelCase )
SCREAMING_SNAKE_CASE = re.sub(r"\s{2,}" , " " , __lowerCamelCase )
if "\n" in token:
SCREAMING_SNAKE_CASE = token.replace("\n" , " __newln__" )
SCREAMING_SNAKE_CASE = token.split(" " )
SCREAMING_SNAKE_CASE = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
SCREAMING_SNAKE_CASE = token.lower()
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "@@ ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word[:-4]
SCREAMING_SNAKE_CASE = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = re.findall(r"\S+\n?" , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(" " ) ) )
return split_tokens
def _snake_case ( self : Any , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = token.lower()
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : List[str] , __lowerCamelCase : int ):
return self.decoder.get(__lowerCamelCase , self.unk_token )
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def _snake_case ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file | 698 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
def __a ( A__ : int , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __a ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : Tuple , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE = torch.load(A__ )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = os.path.join(
A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(A__ ):
SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" )
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {"total_size": total_size}
SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : Optional[int] = parser.parse_args()
__A , __A : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__A : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 698 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "nllb-moe"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Dict , __lowerCamelCase : Dict=128112 , __lowerCamelCase : Optional[Any]=1024 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Optional[Any]=4096 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : Union[str, Any]=4096 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : str=0.05 , __lowerCamelCase : Union[str, Any]=0.05 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : int=1024 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Union[str, Any]=128 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Dict=0.001 , __lowerCamelCase : Dict=0.001 , __lowerCamelCase : List[Any]="all" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : Tuple=0.2 , __lowerCamelCase : str=1 , __lowerCamelCase : str=0 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE = router_z_loss_coef
SCREAMING_SNAKE_CASE = router_aux_loss_coef
SCREAMING_SNAKE_CASE = decoder_sparse_step
SCREAMING_SNAKE_CASE = encoder_sparse_step
SCREAMING_SNAKE_CASE = num_experts
SCREAMING_SNAKE_CASE = expert_capacity
SCREAMING_SNAKE_CASE = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE = router_dtype
SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE = batch_prioritized_routing
SCREAMING_SNAKE_CASE = second_expert_policy
SCREAMING_SNAKE_CASE = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE = moe_token_dropout
SCREAMING_SNAKE_CASE = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , ) | 698 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass | 698 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = '▁'
__A : Any = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__A : List[str] = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__A : Optional[int] = {'vinai/bartpho-syllable': 1_0_2_4}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Dict="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = monolingual_vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE = cnt
cnt += 1
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE = line.strip().split()[0]
SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self : int ):
return len(self.fairseq_ids_to_tokens )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : int ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _snake_case ( self : str , __lowerCamelCase : str ):
return self.fairseq_ids_to_tokens[index]
def _snake_case ( self : str , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"{str(__lowerCamelCase )} \n" )
return out_vocab_file, out_monolingual_vocab_file | 698 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[
[
8.2_220_991, # 3rd highest value; idx. 0
-0.5_620_044,
5.23_229_752,
4.0_386_393,
-6.8_798_378,
-0.54_785_802,
-3.2_012_153,
2.92_777_176,
1.88_171_953,
7.35_341_276, # 5th highest value; idx. 9
8.43_207_833, # 2nd highest value; idx. 10
-9.85_711_836,
-5.96_209_236,
-1.13_039_161,
-7.1_115_294,
-0.8_369_633,
-5.3_186_408,
7.06_427_407,
0.81_369_344,
-0.82_023_817,
-5.9_179_796,
0.58_813_443,
-6.99_778_438,
4.71_551_189,
-0.18_771_637,
7.44_020_759, # 4th highest value; idx. 25
9.38_450_987, # 1st highest value; idx. 26
2.12_662_941,
-9.32_562_038,
2.35_652_522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_425_518,
4.53_139_238,
-5.57_510_464,
-6.28_030_699,
-7.19_529_503,
-4.02_122_551,
1.39_337_037,
-6.06_707_057,
1.59_480_517,
-9.643_119,
0.03_907_799,
0.67_231_762,
-8.88_206_726,
6.27_115_922, # 4th highest value; idx. 13
2.28_520_723,
4.82_767_506,
4.30_421_368,
8.8_275_313, # 2nd highest value; idx. 17
5.44_029_958, # 5th highest value; idx. 18
-4.4_735_794,
7.38_579_536, # 3rd highest value; idx. 20
-2.91_051_663,
2.61_946_077,
-2.5_674_762,
-9.48_959_302,
-4.02_922_645,
-1.35_416_918,
9.67_702_323, # 1st highest value; idx. 27
-5.89_478_553,
1.85_370_467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above
SCREAMING_SNAKE_CASE = tf_top_k_top_p_filtering(__lowerCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
SCREAMING_SNAKE_CASE = output[output != -float("inf" )]
SCREAMING_SNAKE_CASE = tf.cast(
tf.where(tf.not_equal(__lowerCamelCase , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-12 )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , __snake_case ):
'''simple docstring'''
if is_tf_available():
lowerCamelCase__ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _snake_case ( self : Any ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : List[str] ):
super(__lowerCamelCase , self ).__init__()
SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=__lowerCamelCase , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , max_new_tokens=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE = [[2, 0], [102, 103]]
SCREAMING_SNAKE_CASE = [[1, 0], [1, 1]]
SCREAMING_SNAKE_CASE = DummyModel(model=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE = tf.saved_model.load(__lowerCamelCase ).signatures["serving_default"]
for batch_size in range(1 , len(__lowerCamelCase ) + 1 ):
SCREAMING_SNAKE_CASE = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
SCREAMING_SNAKE_CASE = serving_func(**__lowerCamelCase )["sequences"]
SCREAMING_SNAKE_CASE = test_model.generate(**__lowerCamelCase , max_new_tokens=__lowerCamelCase )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
# TF-only test: tf.saved_model export
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
class _SCREAMING_SNAKE_CASE ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Tuple ):
super(__lowerCamelCase , self ).__init__()
SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=__lowerCamelCase , )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase , max_new_tokens=__lowerCamelCase , return_dict_in_generate=__lowerCamelCase , )
return {"sequences": outputs["sequences"]}
SCREAMING_SNAKE_CASE = [[2], [102, 103]]
SCREAMING_SNAKE_CASE = [[1], [1, 1]]
SCREAMING_SNAKE_CASE = DummyModel(model=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCamelCase , __lowerCamelCase , signatures={"serving_default": dummy_model.serving} )
SCREAMING_SNAKE_CASE = tf.saved_model.load(__lowerCamelCase ).signatures["serving_default"]
for input_row in range(len(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
SCREAMING_SNAKE_CASE = serving_func(**__lowerCamelCase )["sequences"]
SCREAMING_SNAKE_CASE = test_model.generate(**__lowerCamelCase , max_new_tokens=__lowerCamelCase )
tf.debugging.assert_equal(__lowerCamelCase , __lowerCamelCase )
@slow
@require_tensorflow_text
def _snake_case ( self : int ):
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int ):
super().__init__()
SCREAMING_SNAKE_CASE = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowerCamelCase , "spiece.model" ) , "rb" ).read() )
SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _snake_case ( self : int , __lowerCamelCase : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.tokenizer.tokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text.pad_model_inputs(
__lowerCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
SCREAMING_SNAKE_CASE = self.model.generate(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
return self.tokenizer.detokenize(__lowerCamelCase )
SCREAMING_SNAKE_CASE = CompleteSentenceTransformer()
SCREAMING_SNAKE_CASE = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
SCREAMING_SNAKE_CASE = complete_model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.keras.Model(__lowerCamelCase , __lowerCamelCase )
keras_model.save(__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
# Has PT equivalent: this test relies on random sampling
SCREAMING_SNAKE_CASE = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
SCREAMING_SNAKE_CASE = 14
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = "Hello, my dog is cute and"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="tf" )
SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
SCREAMING_SNAKE_CASE = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _snake_case ( self : int ):
# Has PT equivalent: ample use of framework-specific code
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE = "Hugging Face is a technology company based in New York and Paris."
SCREAMING_SNAKE_CASE = bart_tokenizer(__lowerCamelCase , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE = bart_model.generate(__lowerCamelCase ).numpy()
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : int ):
return super().call(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
SCREAMING_SNAKE_CASE = bart_model.generate(__lowerCamelCase , foo="bar" ).numpy()
self.assertTrue(np.array_equal(__lowerCamelCase , __lowerCamelCase ) )
class _SCREAMING_SNAKE_CASE ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _snake_case ( self : List[str] , __lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ):
return super().call(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = FakeEncoder(bart_model.config , bart_model.model.shared )
SCREAMING_SNAKE_CASE = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
SCREAMING_SNAKE_CASE = bart_model.generate(__lowerCamelCase ).numpy()
with self.assertRaises(__lowerCamelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowerCamelCase , foo="bar" ) | 698 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main() | 698 | 1 |
import qiskit
def __a ( A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(A__ , A__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE = qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A__ )
if __name__ == "__main__":
__A : Union[str, Any] = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}') | 698 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A : Optional[int] = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether tp freeze the encoder."} )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCamelCase__ = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
lowerCamelCase__ = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
lowerCamelCase__ = field(
default=1_4_2 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase__ = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
lowerCamelCase__ = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
lowerCamelCase__ = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Source language id for translation."} )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "Target language id for translation."} )
lowerCamelCase__ = field(default=__snake_case , metadata={"help": "# num_beams to use for evaluation."} )
lowerCamelCase__ = field(
default=__snake_case , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __a ( A__ : Tuple , A__ : Tuple , A__ : List[Any] ):
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(A__ , os.path.join(A__ , F"{split}_results.json" ) )
def __a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(A__ , A__ , getattr(A__ , A__ ) )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE = (
dataset_class(
A__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
A__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
A__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix="val" )
SCREAMING_SNAKE_CASE = data_args.n_val
SCREAMING_SNAKE_CASE = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=A__ , metric_key_prefix="test" )
SCREAMING_SNAKE_CASE = test_output.metrics
SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE = round(metrics["test_loss"] , 4 )
handle_metrics("test" , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
SCREAMING_SNAKE_CASE = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def __a ( A__ : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 698 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__A : Dict = logging.get_logger(__name__)
__A : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : int = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Dict = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : Union[str, Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : str = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = DPRContextEncoderTokenizer
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = DPRQuestionEncoderTokenizer
__A : str = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : Optional[int] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
assert len(__lowerCamelCase ) == len(
__lowerCamelCase ), f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts."
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"Wrong span indices: [{start_index}:{end_index}]"
SCREAMING_SNAKE_CASE = end_index - start_index + 1
assert length <= max_answer_length, f"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"]
lowerCamelCase__ = DPRReaderTokenizer | 698 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text | 698 | 1 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__A : List[Any] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( A__ : Any , A__ : Union[str, Any] ):
warnings.warn(A__ , A__ )
requires_backends(A__ , "sklearn" )
return (preds == labels).mean()
def __a ( A__ : List[str] , A__ : Tuple ):
warnings.warn(A__ , A__ )
requires_backends(A__ , "sklearn" )
SCREAMING_SNAKE_CASE = simple_accuracy(A__ , A__ )
SCREAMING_SNAKE_CASE = fa_score(y_true=A__ , y_pred=A__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( A__ : List[Any] , A__ : List[str] ):
warnings.warn(A__ , A__ )
requires_backends(A__ , "sklearn" )
SCREAMING_SNAKE_CASE = pearsonr(A__ , A__ )[0]
SCREAMING_SNAKE_CASE = spearmanr(A__ , A__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( A__ : Union[str, Any] , A__ : List[str] , A__ : Optional[Any] ):
warnings.warn(A__ , A__ )
requires_backends(A__ , "sklearn" )
assert len(A__ ) == len(A__ ), F"Predictions and labels have mismatched lengths {len(A__ )} and {len(A__ )}"
if task_name == "cola":
return {"mcc": matthews_corrcoef(A__ , A__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "mrpc":
return acc_and_fa(A__ , A__ )
elif task_name == "sts-b":
return pearson_and_spearman(A__ , A__ )
elif task_name == "qqp":
return acc_and_fa(A__ , A__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(A__ , A__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(A__ , A__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(A__ , A__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(A__ , A__ )}
else:
raise KeyError(A__ )
def __a ( A__ : Tuple , A__ : Union[str, Any] , A__ : Dict ):
warnings.warn(A__ , A__ )
requires_backends(A__ , "sklearn" )
if len(A__ ) != len(A__ ):
raise ValueError(F"Predictions and labels have mismatched lengths {len(A__ )} and {len(A__ )}" )
if task_name == "xnli":
return {"acc": simple_accuracy(A__ , A__ )}
else:
raise KeyError(A__ ) | 698 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
__A : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __a ( A__ : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE = model_type_to_module_name(A__ )
SCREAMING_SNAKE_CASE = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A__ , "__name__" , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE = importlib.import_module("transformers" )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def __a ( A__ : Union[str, os.PathLike] , A__ : Optional[Union[str, os.PathLike]] = None , A__ : bool = False , A__ : bool = False , A__ : Optional[Dict[str, str]] = None , A__ : Optional[Union[bool, str]] = None , A__ : Optional[str] = None , A__ : bool = False , **A__ : str , ):
SCREAMING_SNAKE_CASE = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(A__ , encoding="utf-8" ) as reader:
return json.load(A__ )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def _snake_case ( cls : Any , __lowerCamelCase : Tuple , **__lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = kwargs.pop("config" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = kwargs.pop("trust_remote_code" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = config_dict.get("image_processor_type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE = config_dict.pop("feature_extractor_type" , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
SCREAMING_SNAKE_CASE = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE = config_dict["auto_map"]["AutoFeatureExtractor"]
SCREAMING_SNAKE_CASE = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , "image_processor_type" , __lowerCamelCase )
if hasattr(__lowerCamelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
SCREAMING_SNAKE_CASE = image_processor_class_from_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = kwargs.pop("code_revision" , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def _snake_case ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase ) | 698 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"cls_token": "<s>"}
def _snake_case ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _snake_case ( self : Optional[Any] , **__lowerCamelCase : Any ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : Dict , **__lowerCamelCase : int ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("roberta-base" )
SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = "Encode this sequence."
SCREAMING_SNAKE_CASE = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
SCREAMING_SNAKE_CASE = "Encode <mask> sequence"
SCREAMING_SNAKE_CASE = "Encode <mask>sequence"
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = encoded.index(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = encoded.index(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
pass
def _snake_case ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A, <mask> AllenNLP sentence."
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _snake_case ( self : Tuple ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def _snake_case ( self : Tuple ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE = f"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , ) | 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "imagegpt"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs | 698 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
def __a ( A__ : List[Any] , A__ : Tuple , A__ : Optional[int] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = original_name.split("." )[0]
SCREAMING_SNAKE_CASE = key.split("." )
SCREAMING_SNAKE_CASE = int(key_list[key_list.index(A__ ) - 2] )
SCREAMING_SNAKE_CASE = int(key_list[key_list.index(A__ ) - 1] )
SCREAMING_SNAKE_CASE = orig_block_num - offset
SCREAMING_SNAKE_CASE = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = OrderedDict()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
SCREAMING_SNAKE_CASE = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE = key[: key.find("proj" )]
SCREAMING_SNAKE_CASE = key.replace(A__ , F"patch_embeddings.{total_embed_found}." )
SCREAMING_SNAKE_CASE = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE = "poolformer.encoder." + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "norm1" , "before_norm" )
if "norm2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "norm2" , "after_norm" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE = replace_key_with_offset(A__ , A__ , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
SCREAMING_SNAKE_CASE = key.replace("head" , "classifier" )
SCREAMING_SNAKE_CASE = value
return new_state_dict
def __a ( ):
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def __a ( A__ : List[Any] , A__ : int , A__ : int ):
SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = model_name[-3:]
SCREAMING_SNAKE_CASE = 1000
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = (1, 1000)
# set config attributes
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1E-6
SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1E-6
SCREAMING_SNAKE_CASE = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE = 4.0
SCREAMING_SNAKE_CASE = 1E-6
SCREAMING_SNAKE_CASE = 0.9_5
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=A__ )
# Prepare image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location=torch.device("cpu" ) )
# rename keys
SCREAMING_SNAKE_CASE = rename_keys(A__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(A__ )
model.load_state_dict(A__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=A__ )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE = model(A__ )
SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__A : Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 698 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train() | 698 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__A : Optional[int] = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A : str = 'pt'
elif is_tf_available():
__A : int = 'tf'
else:
__A : str = 'jax'
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = PerceiverTokenizer
lowerCamelCase__ = False
def _snake_case ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def _snake_case ( self : List[str] , **__lowerCamelCase : Optional[Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=20 , __lowerCamelCase : int=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE = []
for i in range(len(__lowerCamelCase ) ):
try:
SCREAMING_SNAKE_CASE = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = list(filter(lambda __lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCamelCase ) , __lowerCamelCase ) )
if max_length is not None and len(__lowerCamelCase ) > max_length:
SCREAMING_SNAKE_CASE = toks[:max_length]
if min_length is not None and len(__lowerCamelCase ) < min_length and len(__lowerCamelCase ) > 0:
while len(__lowerCamelCase ) < min_length:
SCREAMING_SNAKE_CASE = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
if " " not in output_txt and len(__lowerCamelCase ) > 1:
SCREAMING_SNAKE_CASE = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCamelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCamelCase )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE = " " + output_txt
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
return output_txt, output_ids
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = "Unicode €."
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]Unicode €.[SEP]" )
SCREAMING_SNAKE_CASE = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __lowerCamelCase )
# decoding
SCREAMING_SNAKE_CASE = tokenizer.decode(__lowerCamelCase )
self.assertEqual(__lowerCamelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __lowerCamelCase )
self.assertIn("attention_mask" , __lowerCamelCase )
self.assertNotIn("decoder_input_ids" , __lowerCamelCase )
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
SCREAMING_SNAKE_CASE = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE = tokenizer(
text_target=__lowerCamelCase , max_length=32 , padding="max_length" , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _snake_case ( self : int ):
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = after_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE = tokenizer.__class__.from_pretrained(__lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [f"<extra_id_{i}>" for i in range(125 )]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__lowerCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
with open(os.path.join(__lowerCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__lowerCamelCase , __lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
__lowerCamelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__lowerCamelCase )]
SCREAMING_SNAKE_CASE = tokenizer_class.from_pretrained(
__lowerCamelCase , additional_special_tokens=__lowerCamelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def _snake_case ( self : Optional[Any] ):
pass
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : List[str] ):
pass
def _snake_case ( self : Optional[int] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
SCREAMING_SNAKE_CASE = self.get_tokenizers(fast=__lowerCamelCase , do_lower_case=__lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_string(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "lxmert"
lowerCamelCase__ = {}
def __init__( self : Dict , __lowerCamelCase : str=30522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Any=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : int=400 , __lowerCamelCase : Any=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : Tuple=9 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : int=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Tuple=6.67 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Any=True , __lowerCamelCase : int=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = num_qa_labels
SCREAMING_SNAKE_CASE = num_object_labels
SCREAMING_SNAKE_CASE = num_attr_labels
SCREAMING_SNAKE_CASE = l_layers
SCREAMING_SNAKE_CASE = x_layers
SCREAMING_SNAKE_CASE = r_layers
SCREAMING_SNAKE_CASE = visual_feat_dim
SCREAMING_SNAKE_CASE = visual_pos_dim
SCREAMING_SNAKE_CASE = visual_loss_normalizer
SCREAMING_SNAKE_CASE = task_matched
SCREAMING_SNAKE_CASE = task_mask_lm
SCREAMING_SNAKE_CASE = task_obj_predict
SCREAMING_SNAKE_CASE = task_qa
SCREAMING_SNAKE_CASE = visual_obj_loss
SCREAMING_SNAKE_CASE = visual_attr_loss
SCREAMING_SNAKE_CASE = visual_feat_loss
SCREAMING_SNAKE_CASE = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**__lowerCamelCase ) | 698 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 | 1 |
import argparse
__A : str = 'docs/source/_static/js/custom.js'
def __a ( A__ : Any ):
with open(A__ , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
SCREAMING_SNAKE_CASE = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A__ )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
__A : str = parser.parse_args()
update_custom_js(args.version) | 698 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@slow
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 698 | 1 |
from ...configuration_utils import PretrainedConfig
__A : Tuple = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "tapas"
def __init__( self : List[Any] , __lowerCamelCase : Optional[int]=30522 , __lowerCamelCase : int=768 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : str=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : Union[str, Any]=10.0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=1.0 , __lowerCamelCase : int=1.0 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : List[str]=None , __lowerCamelCase : int=None , __lowerCamelCase : int=64 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : str=False , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=True , __lowerCamelCase : str=False , __lowerCamelCase : str=None , __lowerCamelCase : Dict=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_sizes
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE = positive_label_weight
SCREAMING_SNAKE_CASE = num_aggregation_labels
SCREAMING_SNAKE_CASE = aggregation_loss_weight
SCREAMING_SNAKE_CASE = use_answer_as_supervision
SCREAMING_SNAKE_CASE = answer_loss_importance
SCREAMING_SNAKE_CASE = use_normalized_answer_loss
SCREAMING_SNAKE_CASE = huber_loss_delta
SCREAMING_SNAKE_CASE = temperature
SCREAMING_SNAKE_CASE = aggregation_temperature
SCREAMING_SNAKE_CASE = use_gumbel_for_cells
SCREAMING_SNAKE_CASE = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE = average_approximation_function
SCREAMING_SNAKE_CASE = cell_selection_preference
SCREAMING_SNAKE_CASE = answer_loss_cutoff
SCREAMING_SNAKE_CASE = max_num_rows
SCREAMING_SNAKE_CASE = max_num_columns
SCREAMING_SNAKE_CASE = average_logits_per_cell
SCREAMING_SNAKE_CASE = select_one_column
SCREAMING_SNAKE_CASE = allow_empty_column_selection
SCREAMING_SNAKE_CASE = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE = reset_position_index_per_cell
SCREAMING_SNAKE_CASE = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE = aggregation_labels
SCREAMING_SNAKE_CASE = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()} | 698 |
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : int=13 , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , __lowerCamelCase : str=True , __lowerCamelCase : Optional[Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : int=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[Any]=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=512 , __lowerCamelCase : Any=16 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Any=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[Any]=None , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 384
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = "gelu"
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 9
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = None
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = TFConvBertModel(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = TFConvBertForMaskedLM(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFConvBertForSequenceClassification(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = TFConvBertForMultipleChoice(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFConvBertForTokenClassification(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = TFConvBertForQuestionAnswering(config=__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
if hasattr(__lowerCamelCase , "use_cache" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "key_length" , __lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = len(model(__lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase , saved_model=__lowerCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , "saved_model" , "1" )
SCREAMING_SNAKE_CASE = tf.keras.models.load_model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE = outputs["hidden_states"]
SCREAMING_SNAKE_CASE = outputs["attentions"]
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "key_length" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = getattr(self.model_tester , "key_length" , __lowerCamelCase )
def check_decoder_attentions_output(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_decoder_attentions_output(__lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __lowerCamelCase )
check_encoder_attentions_output(__lowerCamelCase )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = [1, 6, 768]
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) | 698 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 1 |
import math
import tensorflow as tf
from packaging import version
def __a ( A__ : Any ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(math.pi , x.dtype )
SCREAMING_SNAKE_CASE = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(A__ , 3 )) ))
return x * cdf
def __a ( A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
return x * tf.tanh(tf.math.softplus(A__ ) )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(0.0_4_4_7_1_5 , x.dtype )
SCREAMING_SNAKE_CASE = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __a ( A__ : Dict ):
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(A__ )
SCREAMING_SNAKE_CASE = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __a ( A__ : List[Any] ):
return tf.clip_by_value(_gelu(A__ ) , -10 , 10 )
def __a ( A__ : List[Any] , A__ : int=-1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tf.split(A__ , 2 , axis=A__ )
return a * tf.math.sigmoid(A__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def __a ( A__ : Optional[Any] ):
return tf.keras.activations.gelu(A__ , approximate=A__ )
__A : Dict = tf.keras.activations.gelu
__A : Optional[int] = approximate_gelu_wrap
else:
__A : Tuple = _gelu
__A : Any = _gelu_new
__A : List[str] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def __a ( A__ : Tuple ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" ) | 698 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (32, 32)
SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def _snake_case ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _snake_case ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _snake_case ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCamelCase )
@property
def _snake_case ( self : Any ):
def extract(*__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ):
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE = torch.ones([0] )
def _snake_case ( self : Any , __lowerCamelCase : Union[str, Any] ):
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__lowerCamelCase )
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert isinstance(pipe.scheduler , __lowerCamelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = self.dummy_cond_unet
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_vae
SCREAMING_SNAKE_CASE = self.dummy_text_encoder
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
SCREAMING_SNAKE_CASE = unet.half()
SCREAMING_SNAKE_CASE = vae.half()
SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
SCREAMING_SNAKE_CASE = 4003660346
SCREAMING_SNAKE_CASE = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "padme amidala taking a bath artwork, safe for work, no nudity"
SCREAMING_SNAKE_CASE = 2734971755
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
SCREAMING_SNAKE_CASE = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
SCREAMING_SNAKE_CASE = 1044355234
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=__lowerCamelCase , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 698 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : Tuple , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE = torch.load(A__ )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = os.path.join(
A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(A__ ):
SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" )
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {"total_size": total_size}
SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : Optional[int] = parser.parse_args()
__A , __A : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__A : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 698 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self : List[str] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , )
SCREAMING_SNAKE_CASE = floats_tensor(control_image.shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _snake_case ( self : List[Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _snake_case ( self : Any ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = StableDiffusionControlNetImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _snake_case ( self : List[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__lowerCamelCase : Tuple ):
if isinstance(__lowerCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__lowerCamelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(__lowerCamelCase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _snake_case ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Any=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__lowerCamelCase , device=torch.device(__lowerCamelCase ) , ),
]
SCREAMING_SNAKE_CASE = floats_tensor(control_image[0].shape , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10.0
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = steps
SCREAMING_SNAKE_CASE = scale
SCREAMING_SNAKE_CASE = pipe(**__lowerCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _snake_case ( self : Any ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _snake_case ( self : Optional[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__lowerCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
SCREAMING_SNAKE_CASE = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=__lowerCamelCase , controlnet=__lowerCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = "evil space-punk bird"
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE = pipe(
__lowerCamelCase , __lowerCamelCase , control_image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2 | 698 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE = BlipProcessor(__lowerCamelCase , __lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self : Dict , **__lowerCamelCase : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).tokenizer
def _snake_case ( self : List[Any] , **__lowerCamelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase ).image_processor
def _snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__lowerCamelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(__lowerCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "lower newer"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=__lowerCamelCase , images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : int = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "mra"
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple=50265 , __lowerCamelCase : str=768 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : str=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[int]=512 , __lowerCamelCase : str=1 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : Tuple="absolute" , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Optional[int]="full" , __lowerCamelCase : str=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : str=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Optional[int] , ):
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = block_per_row
SCREAMING_SNAKE_CASE = approx_mode
SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks | 698 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass | 698 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self : List[Any] , __lowerCamelCase : UNetaDModel , __lowerCamelCase : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : str , __lowerCamelCase : int = 1 , __lowerCamelCase : int = 2000 , __lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ):
SCREAMING_SNAKE_CASE = self.unet.config.sample_size
SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE = self.unet
SCREAMING_SNAKE_CASE = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCamelCase )
self.scheduler.set_sigmas(__lowerCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
SCREAMING_SNAKE_CASE = self.scheduler.step_correct(__lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase ).sample
SCREAMING_SNAKE_CASE = self.scheduler.step_pred(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCamelCase ) | 698 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "blip_text_model"
def __init__( self : str , __lowerCamelCase : int=30524 , __lowerCamelCase : Any=768 , __lowerCamelCase : Optional[int]=768 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : Any=8 , __lowerCamelCase : Any=512 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[Any]=1e-12 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Optional[int]=30522 , __lowerCamelCase : Dict=2 , __lowerCamelCase : str=0 , __lowerCamelCase : List[Any]=102 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Optional[Any] , ):
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , sep_token_id=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = encoder_hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = is_decoder
SCREAMING_SNAKE_CASE = use_cache
@classmethod
def _snake_case ( cls : Tuple , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Tuple ):
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "blip_vision_model"
def __init__( self : Dict , __lowerCamelCase : Dict=768 , __lowerCamelCase : Optional[Any]=3072 , __lowerCamelCase : Dict=512 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=384 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : Dict=1e-5 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[int]=1e-10 , **__lowerCamelCase : List[str] , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def _snake_case ( cls : str , __lowerCamelCase : Union[str, os.PathLike] , **__lowerCamelCase : Any ):
cls._set_token_in_kwargs(__lowerCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "blip"
lowerCamelCase__ = True
def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=2.6_592 , __lowerCamelCase : Optional[Any]=256 , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
SCREAMING_SNAKE_CASE = BlipTextConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = BlipVisionConfig(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = image_text_hidden_size
@classmethod
def _snake_case ( cls : Dict , __lowerCamelCase : BlipTextConfig , __lowerCamelCase : BlipVisionConfig , **__lowerCamelCase : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output | 698 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main() | 698 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__A : Dict = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( A__ : List[str] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( A__ : str , A__ : str , A__ : List[str] ):
return max(metric_fn(A__ , A__ ) for gt in ground_truths )
def __a ( A__ : Union[str, Any] , A__ : Dict , A__ : Tuple ):
SCREAMING_SNAKE_CASE = [line.strip() for line in open(A__ , "r" ).readlines()]
SCREAMING_SNAKE_CASE = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE = pd.read_csv(A__ , sep="\t" , header=A__ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE = ast.literal_eval(A__ )
answers.append(A__ )
else:
SCREAMING_SNAKE_CASE = [line.strip() for line in open(A__ , "r" ).readlines()]
SCREAMING_SNAKE_CASE = [[reference] for reference in references]
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = 0
for prediction, ground_truths in zip(A__ , A__ ):
total += 1
em += metric_max_over_ground_truths(A__ , A__ , A__ )
fa += metric_max_over_ground_truths(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE = 1_0_0.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def __a ( A__ : int , A__ : Optional[int] , A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = args.k
SCREAMING_SNAKE_CASE = [line.strip() for line in open(A__ , "r" ).readlines()]
SCREAMING_SNAKE_CASE = [line.strip() for line in open(A__ , "r" ).readlines()]
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = 0
for hypo, reference in zip(A__ , A__ ):
SCREAMING_SNAKE_CASE = set(hypo.split("\t" )[:k] )
SCREAMING_SNAKE_CASE = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE = 1_0_0.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def __a ( A__ : Optional[int] , A__ : Tuple , A__ : Tuple ):
def strip_title(A__ : Optional[Any] ):
if title.startswith("\"" ):
SCREAMING_SNAKE_CASE = title[1:]
if title.endswith("\"" ):
SCREAMING_SNAKE_CASE = title[:-1]
return title
SCREAMING_SNAKE_CASE = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A__ , return_tensors="pt" , padding=A__ , truncation=A__ , )["input_ids"].to(args.device )
SCREAMING_SNAKE_CASE = rag_model.rag.question_encoder(A__ )
SCREAMING_SNAKE_CASE = question_enc_outputs[0]
SCREAMING_SNAKE_CASE = rag_model.retriever(
A__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE = []
for docs in all_docs:
SCREAMING_SNAKE_CASE = [strip_title(A__ ) for title in docs["title"]]
provenance_strings.append("\t".join(A__ ) )
return provenance_strings
def __a ( A__ : Tuple , A__ : int , A__ : Union[str, Any] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A__ , return_tensors="pt" , padding=A__ , truncation=A__ )
SCREAMING_SNAKE_CASE = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE = rag_model.generate( # rag_model overwrites generate
A__ , attention_mask=A__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE = rag_model.retriever.generator_tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
if args.print_predictions:
for q, a in zip(A__ , A__ ):
logger.info("Q: {} - A: {}".format(A__ , A__ ) )
return answers
def __a ( ):
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A__ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=A__ , choices=["exact", "compressed", "legacy"] , type=A__ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=A__ , type=A__ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=A__ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=A__ , type=A__ , required=A__ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A__ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=A__ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=A__ , type=A__ , required=A__ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=A__ , type=A__ , required=A__ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=A__ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=A__ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=A__ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=A__ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=A__ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=A__ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
SCREAMING_SNAKE_CASE = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE = args.index_path
else:
SCREAMING_SNAKE_CASE = BartForConditionalGeneration
SCREAMING_SNAKE_CASE = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , A__ )
SCREAMING_SNAKE_CASE = get_scores if args.eval_mode == "e2e" else get_precision_at_k
SCREAMING_SNAKE_CASE = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(A__ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(A__ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE = RagRetriever.from_pretrained(A__ , **A__ )
SCREAMING_SNAKE_CASE = model_class.from_pretrained(A__ , retriever=A__ , **A__ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE = model_class.from_pretrained(A__ , **A__ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
SCREAMING_SNAKE_CASE = []
for line in tqdm(A__ ):
questions.append(line.strip() )
if len(A__ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE = evaluate_batch_fn(A__ , A__ , A__ )
preds_file.write("\n".join(A__ ) + "\n" )
preds_file.flush()
SCREAMING_SNAKE_CASE = []
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE = evaluate_batch_fn(A__ , A__ , A__ )
preds_file.write("\n".join(A__ ) )
preds_file.flush()
score_fn(A__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__A : Tuple = get_args()
main(args) | 698 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__A : Tuple = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__A : Dict = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __a ( ):
SCREAMING_SNAKE_CASE = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE = bs[:]
SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A__ )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE = [chr(A__ ) for n in cs]
return dict(zip(A__ , A__ ) )
def __a ( A__ : Optional[int] ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE = char
return pairs
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : List[Any]="replace" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Any="<s>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[Any]=False , **__lowerCamelCase : Tuple , ):
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE = bytes_to_unicode()
SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _snake_case ( self : Dict ):
return len(self.encoder )
def _snake_case ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , __lowerCamelCase : List[str] ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = bigram
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = word
return word
def _snake_case ( self : str , __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : List[str] , __lowerCamelCase : str ):
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
return self.decoder.get(__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = "".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + "\n" )
SCREAMING_SNAKE_CASE = 0
with open(__lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE = token_index
writer.write(" ".join(__lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=False , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE = " " + text
return (text, kwargs)
def _snake_case ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _snake_case ( self : Tuple , __lowerCamelCase : "Conversation" ):
SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = " ".join(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.encode(__lowerCamelCase )
if len(__lowerCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 698 | 1 |
from pathlib import Path
import numpy as np
from PIL import Image
def __a ( A__ : np.ndarray ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __a ( A__ : np.ndarray ):
return (gray > 127) & (gray <= 255)
def __a ( A__ : np.ndarray , A__ : np.ndarray ):
SCREAMING_SNAKE_CASE = np.zeros_like(A__ )
SCREAMING_SNAKE_CASE = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__A : int = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__A : List[str] = np.array(Image.open(lena_path))
# kernel to be applied
__A : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__A : str = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__A : List[Any] = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png') | 698 |
from __future__ import annotations
from cmath import sqrt
def __a ( A__ : int , A__ : int , A__ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(A__ )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(A__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main() | 698 | 1 |
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def _snake_case ( self : Optional[int] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : int , __lowerCamelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE = True
print(__lowerCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__A : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 698 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text | 698 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A : List[Any] = datasets.utils.logging.get_logger(__name__)
__A : Optional[Any] = ['names', 'prefix']
__A : Optional[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__A : Optional[Any] = ['encoding_errors', 'on_bad_lines']
__A : str = ['date_format']
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ = ","
lowerCamelCase__ = None
lowerCamelCase__ = "infer"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = "."
lowerCamelCase__ = None
lowerCamelCase__ = '"'
lowerCamelCase__ = 0
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = "strict"
lowerCamelCase__ = "error"
lowerCamelCase__ = None
def _snake_case ( self : int ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE = self.column_names
@property
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ = CsvConfig
def _snake_case ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self : int , __lowerCamelCase : int ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def _snake_case ( self : str , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def _snake_case ( self : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}" )
raise | 698 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A : int = TypeVar('KT')
__A : List[str] = TypeVar('VT')
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : KT | str = "root" , __lowerCamelCase : VT | None = None ):
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = []
def __repr__( self : Optional[Any] ):
return f"Node({self.key}: {self.value})"
@property
def _snake_case ( self : Optional[Any] ):
return len(self.forward )
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = 16 ):
SCREAMING_SNAKE_CASE = Node[KT, VT]()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = p
SCREAMING_SNAKE_CASE = max_level
def __str__( self : int ):
SCREAMING_SNAKE_CASE = list(self )
if len(__lowerCamelCase ) == 0:
return f"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE = max((len(str(__lowerCamelCase ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE = max(__lowerCamelCase , 4 ) + 4
SCREAMING_SNAKE_CASE = self.head
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node.forward.copy()
lines.append(f"[{node.key}]".ljust(__lowerCamelCase , "-" ) + "* " * len(__lowerCamelCase ) )
lines.append(" " * label_size + "| " * len(__lowerCamelCase ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
lines.append(
f"[{node.key}]".ljust(__lowerCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE = node.forward
lines.append("None".ljust(__lowerCamelCase ) + "* " * len(__lowerCamelCase ) )
return f"SkipList(level={self.level})\n" + "\n".join(__lowerCamelCase )
def __iter__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE = node.forward[0]
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _snake_case ( self : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _snake_case ( self : List[str] , __lowerCamelCase : KT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(__lowerCamelCase )
if node is not None:
for i, update_node in enumerate(__lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE = node.forward[i]
else:
SCREAMING_SNAKE_CASE = update_node.forward[:i]
def _snake_case ( self : Optional[int] , __lowerCamelCase : KT , __lowerCamelCase : VT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(__lowerCamelCase )
if node is not None:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowerCamelCase ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE = level
SCREAMING_SNAKE_CASE = Node(__lowerCamelCase , __lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = new_node
def _snake_case ( self : int , __lowerCamelCase : VT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(__lowerCamelCase )
if node is not None:
return node.value
return None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
SCREAMING_SNAKE_CASE = skip_list.head
SCREAMING_SNAKE_CASE = {}
while node.level != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
SCREAMING_SNAKE_CASE = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
SCREAMING_SNAKE_CASE = skip_list.head
SCREAMING_SNAKE_CASE = {}
while node.level != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
SCREAMING_SNAKE_CASE = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
assert skip_list.find("Some key" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(A__ : Dict ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __a ( ):
def is_sorted(A__ : Dict ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
SCREAMING_SNAKE_CASE = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def __a ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 698 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[Any] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Optional[int] = np.array(data['target'])
__A : Union[str, Any] = data['target_names']
__A , __A , __A , __A : Optional[int] = train_test_split(X, y)
def __a ( A__ : Optional[int] , A__ : Dict ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def __a ( A__ : Optional[Any] , A__ : int , A__ : Dict , A__ : Optional[Any] , A__ : Dict=5 ):
SCREAMING_SNAKE_CASE = zip(A__ , A__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE = []
for data_point in data:
SCREAMING_SNAKE_CASE = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 698 | 1 |
__A : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__A : Optional[Any] = ['a', 'b', 'c', 'd', 'e']
def __a ( A__ : Dict , A__ : List[Any] , A__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = start
# add current to visited
visited.append(A__ )
SCREAMING_SNAKE_CASE = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
SCREAMING_SNAKE_CASE = topological_sort(A__ , A__ , A__ )
# if all neighbors visited add current to sort
sort.append(A__ )
# if all vertices haven't been visited select a new one to visit
if len(A__ ) != len(A__ ):
for vertice in vertices:
if vertice not in visited:
SCREAMING_SNAKE_CASE = topological_sort(A__ , A__ , A__ )
# return sort
return sort
if __name__ == "__main__":
__A : Any = topological_sort('a', [], [])
print(sort) | 698 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : Optional[Any] , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=64 , __lowerCamelCase : str=[256, 512, 1024, 2048] , __lowerCamelCase : str=[3, 4, 6, 3] , __lowerCamelCase : Optional[int]="bottleneck" , __lowerCamelCase : int="relu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , **__lowerCamelCase : Dict , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[int] ):
return 1e-3 | 698 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : str , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : float ):
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
SCREAMING_SNAKE_CASE = probability
def _snake_case ( self : Optional[Any] ):
return list(self.connections )
def _snake_case ( self : Tuple , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __a ( A__ : str , A__ : list[tuple[str, str, float]] , A__ : int ):
SCREAMING_SNAKE_CASE = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE = Counter(graph.get_nodes() )
SCREAMING_SNAKE_CASE = start
for _ in range(A__ ):
SCREAMING_SNAKE_CASE = graph.transition(A__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "imagegpt"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , __lowerCamelCase : Any=512 + 1 , __lowerCamelCase : str=32 * 32 , __lowerCamelCase : Any=512 , __lowerCamelCase : Optional[int]=24 , __lowerCamelCase : Tuple=8 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]="quick_gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=1e-5 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : int=False , **__lowerCamelCase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_embd
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self : Optional[int] , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 32 , __lowerCamelCase : int = 32 , ):
SCREAMING_SNAKE_CASE = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs | 698 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
SCREAMING_SNAKE_CASE = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
SCREAMING_SNAKE_CASE = "xvjiarui/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionInpaintPipeline.from_pretrained(__lowerCamelCase , safety_checker=__lowerCamelCase )
SCREAMING_SNAKE_CASE = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = num_samples * [init_image]
SCREAMING_SNAKE_CASE = num_samples * [mask_image]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__lowerCamelCase )
SCREAMING_SNAKE_CASE = jax.random.split(__lowerCamelCase , jax.device_count() )
SCREAMING_SNAKE_CASE = shard(__lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(__lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , jit=__lowerCamelCase )
SCREAMING_SNAKE_CASE = output.images.reshape(__lowerCamelCase , 512 , 512 , 3 )
SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE = jnp.array(
[0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 698 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@slow
@require_torch
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
SCREAMING_SNAKE_CASE = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE = tokenizer(batch["article"] , padding="max_length" , truncation=__lowerCamelCase , max_length=512 )
SCREAMING_SNAKE_CASE = tokenizer(batch["highlights"] , padding="max_length" , truncation=__lowerCamelCase , max_length=128 )
SCREAMING_SNAKE_CASE = inputs.input_ids
SCREAMING_SNAKE_CASE = inputs.attention_mask
SCREAMING_SNAKE_CASE = outputs.input_ids
SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(__lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = pred.label_ids
SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowerCamelCase ) )] ) / len(__lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowerCamelCase , batch_size=__lowerCamelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=__lowerCamelCase , per_device_train_batch_size=__lowerCamelCase , per_device_eval_batch_size=__lowerCamelCase , predict_with_generate=__lowerCamelCase , evaluation_strategy="steps" , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , tokenizer=__lowerCamelCase , )
# start training
trainer.train() | 698 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Union[str, Any] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , "num_attention_heads" ) )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Tuple=[128, 256, 384] , __lowerCamelCase : int=[4, 6, 8] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[str]=[16, 16, 16] , __lowerCamelCase : int=0 , __lowerCamelCase : List[Any]=[2, 2, 2] , __lowerCamelCase : List[str]=[2, 2, 2] , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : int=2 , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any ):
SCREAMING_SNAKE_CASE = LevitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LevitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = LevitModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _snake_case ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Any ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Tuple ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : Tuple ):
pass
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def check_hidden_states_output(__lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = len(self.model_tester.depths ) + 1
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ):
pass
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : int=False ):
SCREAMING_SNAKE_CASE = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _snake_case ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
SCREAMING_SNAKE_CASE = problem_type["title"]
SCREAMING_SNAKE_CASE = problem_type["num_labels"]
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
SCREAMING_SNAKE_CASE = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
SCREAMING_SNAKE_CASE = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def _snake_case ( self : List[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LevitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) ) | 698 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None # sigma(t_i)
@classmethod
def _snake_case ( cls : Tuple ):
return cls()
@dataclass
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : List[Any] ):
return True
@register_to_config
def __init__( self : int , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 100 , __lowerCamelCase : float = 1.007 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 50 , ):
pass
def _snake_case ( self : Optional[int] ):
return KarrasVeSchedulerState.create()
def _snake_case ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ):
SCREAMING_SNAKE_CASE = jnp.arange(0 , __lowerCamelCase )[::-1].copy()
SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCamelCase , schedule=jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , timesteps=__lowerCamelCase , )
def _snake_case ( self : Dict , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ):
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE = random.split(__lowerCamelCase , num=1 )
SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=__lowerCamelCase , shape=sample.shape )
SCREAMING_SNAKE_CASE = sigma + gamma * sigma
SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Dict , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def _snake_case ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def _snake_case ( self : Dict , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
raise NotImplementedError() | 698 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
return None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int ):
return None
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
@require_torch
@slow
def _snake_case ( self : Optional[int] ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__lowerCamelCase ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(__lowerCamelCase ) ) )
model.save_pretrained(__lowerCamelCase )
self._test_export(__lowerCamelCase , "pt" , 12 , __lowerCamelCase )
@require_tf
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "tf" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(Path(__lowerCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def _snake_case ( self : Any ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE = self._test_export(__lowerCamelCase , "pt" , 12 , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = quantize(__lowerCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__lowerCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def _snake_case ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Tuple=None , **__lowerCamelCase : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE = Path(__lowerCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
return path
except Exception as e:
self.fail(__lowerCamelCase )
@require_torch
@require_tokenizers
@slow
def _snake_case ( self : Dict ):
from transformers import BertModel
SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def _snake_case ( self : int ):
from transformers import TFBertModel
SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__lowerCamelCase , __lowerCamelCase , "tf" )
def _snake_case ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ):
SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = infer_shapes(__lowerCamelCase , __lowerCamelCase )
# Assert all variables are present
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __lowerCamelCase )
self.assertSequenceEqual(variable_names[3:] , __lowerCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__lowerCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__lowerCamelCase ) , set(__lowerCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__lowerCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , __lowerCamelCase , __lowerCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__lowerCamelCase ) , 1 )
self.assertEqual(len(__lowerCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 698 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = IFInpaintingSuperResolutionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def _snake_case ( self : Tuple ):
return self._get_superresolution_dummy_components()
def _snake_case ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int=0 ):
if str(__lowerCamelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _snake_case ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _snake_case ( self : Optional[int] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _snake_case ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _snake_case ( self : List[Any] ):
self._test_save_load_local()
def _snake_case ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , ) | 698 |
from manim import *
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
cpu_targs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , aligned_edge=__lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
SCREAMING_SNAKE_CASE = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase ) , Write(__lowerCamelCase ) )
self.play(Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
first_animations.append(GrowFromCenter(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(*__lowerCamelCase )
self.wait() | 698 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __a ( A__ : List[Any] , A__ : List[Any] , A__ : Optional[int]=0 ):
# Format the message.
if name is None:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
SCREAMING_SNAKE_CASE = fmt.format(A__ )
# Print and recurse (if needed).
if isinstance(A__ , A__ ):
if msg is not None:
print(A__ )
for k in val.keys():
recursive_print(A__ , val[k] , spaces + 2 )
elif isinstance(A__ , torch.Tensor ):
print(A__ , ":" , val.size() )
else:
print(A__ , ":" , A__ )
def __a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : Dict , A__ : List[str] , A__ : List[Any] ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
SCREAMING_SNAKE_CASE = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
SCREAMING_SNAKE_CASE = (num_heads, hidden_size, num_splits) + input_shape[1:]
SCREAMING_SNAKE_CASE = param.view(*A__ )
SCREAMING_SNAKE_CASE = param.transpose(0 , 2 )
SCREAMING_SNAKE_CASE = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
SCREAMING_SNAKE_CASE = (num_heads, num_splits, hidden_size) + input_shape[1:]
SCREAMING_SNAKE_CASE = param.view(*A__ )
SCREAMING_SNAKE_CASE = param.transpose(0 , 1 ).contiguous()
SCREAMING_SNAKE_CASE = param.view(*A__ )
return param
def __a ( A__ : Optional[Any] , A__ : Tuple , A__ : Any ):
# The converted output model.
SCREAMING_SNAKE_CASE = {}
# old versions did not store training args
SCREAMING_SNAKE_CASE = input_state_dict.get("args" , A__ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
SCREAMING_SNAKE_CASE = ds_args.padded_vocab_size
SCREAMING_SNAKE_CASE = ds_args.max_position_embeddings
SCREAMING_SNAKE_CASE = ds_args.hidden_size
SCREAMING_SNAKE_CASE = ds_args.num_layers
SCREAMING_SNAKE_CASE = ds_args.num_attention_heads
SCREAMING_SNAKE_CASE = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
SCREAMING_SNAKE_CASE = config.n_head
# The hidden_size per head.
SCREAMING_SNAKE_CASE = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
SCREAMING_SNAKE_CASE = input_state_dict["checkpoint_version"]
else:
SCREAMING_SNAKE_CASE = 0.0
# The model.
SCREAMING_SNAKE_CASE = input_state_dict["model"]
# The language model.
SCREAMING_SNAKE_CASE = model["language_model"]
# The embeddings.
SCREAMING_SNAKE_CASE = lm["embedding"]
# The word embeddings.
SCREAMING_SNAKE_CASE = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
SCREAMING_SNAKE_CASE = word_embeddings[: config.vocab_size, :]
SCREAMING_SNAKE_CASE = word_embeddings
# The position embeddings.
SCREAMING_SNAKE_CASE = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
SCREAMING_SNAKE_CASE = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
SCREAMING_SNAKE_CASE = pos_embeddings
# The transformer.
SCREAMING_SNAKE_CASE = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
SCREAMING_SNAKE_CASE = re.compile(R"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
SCREAMING_SNAKE_CASE = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
SCREAMING_SNAKE_CASE = layer_re.match(A__ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
SCREAMING_SNAKE_CASE = int(m.group(1 ) )
# The name of the operation.
SCREAMING_SNAKE_CASE = m.group(2 )
# Is it a weight or a bias?
SCREAMING_SNAKE_CASE = m.group(3 )
# The name of the layer.
SCREAMING_SNAKE_CASE = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
SCREAMING_SNAKE_CASE = "ln_1" if op_name.startswith("input" ) else "ln_2"
SCREAMING_SNAKE_CASE = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
SCREAMING_SNAKE_CASE = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , A__ , A__ )
SCREAMING_SNAKE_CASE = causal_mask
# Insert a "dummy" tensor for masked_bias.
SCREAMING_SNAKE_CASE = torch.tensor(-1E4 , dtype=torch.floataa )
SCREAMING_SNAKE_CASE = masked_bias
SCREAMING_SNAKE_CASE = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
SCREAMING_SNAKE_CASE = out_val.transpose(0 , 1 ).contiguous()
# Store.
SCREAMING_SNAKE_CASE = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
SCREAMING_SNAKE_CASE = fix_query_key_value_ordering(A__ , A__ , 3 , A__ , A__ )
# Store. No change of shape.
SCREAMING_SNAKE_CASE = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
SCREAMING_SNAKE_CASE = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
SCREAMING_SNAKE_CASE = megatron_to_transformers[op_name]
SCREAMING_SNAKE_CASE = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
SCREAMING_SNAKE_CASE = transformer["final_layernorm.weight"]
SCREAMING_SNAKE_CASE = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
SCREAMING_SNAKE_CASE = word_embeddings
# It should be done!
return output_state_dict
def __a ( ):
# Create the argument parser.
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=A__ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=A__ , help="An optional config json file describing the pre-trained model." , )
SCREAMING_SNAKE_CASE = parser.parse_args()
# Extract the basename.
SCREAMING_SNAKE_CASE = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
SCREAMING_SNAKE_CASE = torch.load(A__ , map_location="cpu" )
else:
SCREAMING_SNAKE_CASE = torch.load(args.path_to_checkpoint , map_location="cpu" )
SCREAMING_SNAKE_CASE = input_state_dict.get("args" , A__ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
SCREAMING_SNAKE_CASE = "gelu_fast"
elif ds_args.openai_gelu:
SCREAMING_SNAKE_CASE = "gelu_new"
else:
SCREAMING_SNAKE_CASE = "gelu"
else:
# in the very early days this used to be "gelu_new"
SCREAMING_SNAKE_CASE = "gelu_new"
# Spell out all parameters in case the defaults change.
SCREAMING_SNAKE_CASE = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=A__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="cls_index" , summary_use_proj=A__ , summary_activation=A__ , summary_proj_to_labels=A__ , summary_first_dropout=0.1 , scale_attn_weights=A__ , use_cache=A__ , bos_token_id=50256 , eos_token_id=50256 , )
else:
SCREAMING_SNAKE_CASE = GPTaConfig.from_json_file(args.config_file )
SCREAMING_SNAKE_CASE = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
SCREAMING_SNAKE_CASE = convert_megatron_checkpoint(A__ , A__ , A__ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(A__ , A__ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
SCREAMING_SNAKE_CASE = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
SCREAMING_SNAKE_CASE = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
SCREAMING_SNAKE_CASE = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
SCREAMING_SNAKE_CASE = "gpt2"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE = type(A__ ).__name__
SCREAMING_SNAKE_CASE = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(A__ )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(A__ )
# Store the state_dict to file.
SCREAMING_SNAKE_CASE = os.path.join(A__ , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(A__ , A__ )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 698 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Any , **__lowerCamelCase : Any ):
pass
@is_pipeline_test
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowerCamelCase ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
[
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
{"score": 0.333, "label": ANY(__lowerCamelCase )},
],
] , )
@slow
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
SCREAMING_SNAKE_CASE = image_classifier(__lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , ) | 698 | 1 |
from __future__ import annotations
def __a ( A__ : int | float | str , A__ : int | float | str ):
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE = int(A__ )
SCREAMING_SNAKE_CASE = int(A__ )
SCREAMING_SNAKE_CASE = []
for temp in range(int(A__ ) ):
series.append(F"1 / {pow(temp + 1 , int(A__ ) )}" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = int(input('Enter the last number (nth term) of the P-Series'))
__A : int = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power)) | 698 |
__A : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __a ( A__ : str , A__ : str , A__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE = (
F"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
F"Valid values are: {', '.join(A__ )}"
)
raise ValueError(A__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "roc_bert"
def __init__( self : List[str] , __lowerCamelCase : Any=30522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : int=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=3072 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=1e-12 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : str="absolute" , __lowerCamelCase : Dict=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[str]=768 , __lowerCamelCase : Any=910 , __lowerCamelCase : Union[str, Any]=512 , __lowerCamelCase : Dict=24858 , __lowerCamelCase : Tuple=True , **__lowerCamelCase : Any , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = enable_pronunciation
SCREAMING_SNAKE_CASE = enable_shape
SCREAMING_SNAKE_CASE = pronunciation_embed_dim
SCREAMING_SNAKE_CASE = pronunciation_vocab_size
SCREAMING_SNAKE_CASE = shape_embed_dim
SCREAMING_SNAKE_CASE = shape_vocab_size
SCREAMING_SNAKE_CASE = concat_input
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) | 698 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : Union[str, Any] ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 698 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neo"
lowerCamelCase__ = ["past_key_values"]
lowerCamelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Dict=50257 , __lowerCamelCase : Tuple=2048 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : int=24 , __lowerCamelCase : int=[[["global", "local"], 12]] , __lowerCamelCase : int=16 , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=256 , __lowerCamelCase : Tuple="gelu_new" , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=50256 , __lowerCamelCase : Optional[int]=50256 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_layers
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_dropout
SCREAMING_SNAKE_CASE = embed_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = attention_types
SCREAMING_SNAKE_CASE = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def _snake_case ( __lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __a ( A__ : str , A__ : List[Any] , A__ : List[str] , A__ : Union[str, Any] ):
import torch
SCREAMING_SNAKE_CASE = input.size()
SCREAMING_SNAKE_CASE = len(A__ )
SCREAMING_SNAKE_CASE = shape[dimension]
SCREAMING_SNAKE_CASE = torch.arange(0 , A__ , A__ )
SCREAMING_SNAKE_CASE = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
SCREAMING_SNAKE_CASE = torch.arange(A__ ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE = [slice(A__ )] * rank
SCREAMING_SNAKE_CASE = indices
SCREAMING_SNAKE_CASE = input[s]
SCREAMING_SNAKE_CASE = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __a ( A__ : Union[str, Any] , A__ : Optional[int] ):
import torch
SCREAMING_SNAKE_CASE = torch.arange(1 , A__ )
SCREAMING_SNAKE_CASE = torch.remainder(A__ , A__ )
SCREAMING_SNAKE_CASE = remainders == 0
SCREAMING_SNAKE_CASE = candidates[divisor_indices]
SCREAMING_SNAKE_CASE = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
@property
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
SCREAMING_SNAKE_CASE = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _snake_case ( self : Optional[int] ):
return self._config.num_heads
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE = seqlen + 2
SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self : Optional[int] ):
return 13 | 698 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__A : str = 5_0_0_0_0
__A : Optional[Any] = 5_0_0_0
__A , __A : Union[str, Any] = os.path.split(__file__)
__A : int = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __a ( A__ : datasets.Dataset , A__ : Union[str, Any] ):
for i in range(A__ ):
SCREAMING_SNAKE_CASE = dataset[i]
@get_duration
def __a ( A__ : datasets.Dataset , A__ : Optional[int] , A__ : Tuple ):
for i in range(0 , len(A__ ) , A__ ):
SCREAMING_SNAKE_CASE = dataset[i : i + batch_size]
@get_duration
def __a ( A__ : datasets.Dataset , A__ : List[Any] , A__ : str ):
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
SCREAMING_SNAKE_CASE = dataset[i]
@get_duration
def __a ( A__ : datasets.Dataset , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Union[str, Any] ):
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
SCREAMING_SNAKE_CASE = dataset[i : i + batch_size]
def __a ( ):
SCREAMING_SNAKE_CASE = {"num examples": SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
SCREAMING_SNAKE_CASE = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
SCREAMING_SNAKE_CASE = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
SCREAMING_SNAKE_CASE = generate_example_dataset(
os.path.join(A__ , "dataset.arrow" ) , A__ , num_examples=A__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
SCREAMING_SNAKE_CASE = func(A__ , **A__ )
print("shuffling dataset" )
SCREAMING_SNAKE_CASE = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(A__ ) )
SCREAMING_SNAKE_CASE = func(
A__ , **A__ )
with open(A__ , "wb" ) as f:
f.write(json.dumps(A__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 698 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : Any = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 698 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : NestedDataStructureLike[PathLike] , __lowerCamelCase : Optional[NamedSplit] = None , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : Tuple , ):
super().__init__(
__lowerCamelCase , split=__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = field
SCREAMING_SNAKE_CASE = path_or_paths if isinstance(__lowerCamelCase , __lowerCamelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE = Json(
cache_dir=__lowerCamelCase , data_files=__lowerCamelCase , features=__lowerCamelCase , field=__lowerCamelCase , **__lowerCamelCase , )
def _snake_case ( self : Any ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Dataset , __lowerCamelCase : Union[PathLike, BinaryIO] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : int , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = path_or_buf
SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE = num_proc
SCREAMING_SNAKE_CASE = "utf-8"
SCREAMING_SNAKE_CASE = to_json_kwargs
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("path_or_buf" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("orient" , "records" )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("compression" , __lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=__lowerCamelCase ) as buffer:
SCREAMING_SNAKE_CASE = self._write(file_obj=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
SCREAMING_SNAKE_CASE = self._write(
file_obj=self.path_or_buf , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **self.to_json_kwargs )
return written
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = args
SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(__lowerCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE = batch.to_pandas().to_json(
path_or_buf=__lowerCamelCase , orient=__lowerCamelCase , lines=__lowerCamelCase , index=__lowerCamelCase , **__lowerCamelCase )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : BinaryIO , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : int , **__lowerCamelCase : List[str] , ):
SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCamelCase , __lowerCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(__lowerCamelCase )
return written | 698 |
import cmath
import math
def __a ( A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = math.radians(A__ )
SCREAMING_SNAKE_CASE = math.radians(A__ )
# Convert voltage and current to rectangular form
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
SCREAMING_SNAKE_CASE = cmath.rect(A__ , A__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod() | 698 | 1 |
Subsets and Splits