code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Optional[int] = [0] * no_of_processes
_lowerCAmelCase : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = burst_time[i]
_lowerCAmelCase : list[int] = []
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : int = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_lowerCAmelCase : str = []
_lowerCAmelCase : List[str] = -1
for i in range(_lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Tuple = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_lowerCAmelCase : Tuple = i
total_time += burst_time[target_process]
completed += 1
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Tuple = [0] * no_of_processes
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
_a : Dict = 4
_a : Optional[Any] = [2, 5, 3, 7]
_a : List[str] = [0, 0, 0, 0]
_a : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a : str = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 44 | import torch
from torch import nn
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> Any:
super().__init__()
_A = n_token
_A = d_embed
_A = d_proj
_A = cutoffs + [n_token]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_A = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_A = nn.Parameter(torch.zeros(self.n_clusters ) )
_A = nn.ModuleList()
_A = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
_A = keep_order
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if proj is None:
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_A = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_A = hidden[..., :-1, :].contiguous()
_A = labels[..., 1:].contiguous()
_A = hidden.view(-1 , hidden.size(-1 ) )
_A = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_A = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_A = labels != -1_00
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = 0
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_A = (labels >= l_idx) & (labels < r_idx)
_A = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_A = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
_A = head_logprob.index_select(0 , lowerCAmelCase_ )
_A = hidden.index_select(0 , lowerCAmelCase_ )
else:
_A = hidden
if i == 0:
if labels is not None:
_A = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_A = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_A = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = head_logprob[:, -i] + tail_logprob_i
_A = logprob_i
return out
| 180 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=0.6 , _UpperCAmelCase=None , ):
__a : Optional[int] = parent
__a : Any = batch_size
__a : Any = image_size
__a : Union[str, Any] = patch_size
__a : Tuple = num_channels
__a : Tuple = is_training
__a : Union[str, Any] = use_labels
__a : Optional[int] = hidden_size
__a : Tuple = num_hidden_layers
__a : str = num_attention_heads
__a : Dict = intermediate_size
__a : List[Any] = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : Tuple = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : List[Any] = mask_ratio
__a : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__a : List[str] = (image_size // patch_size) ** 2
__a : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[Any] = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = TFViTMAEModel(config=_lowerCamelCase )
__a : List[Any] = model(_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = TFViTMAEForPreTraining(_lowerCamelCase )
__a : List[str] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected sequence length = num_patches
__a : int = (self.image_size // self.patch_size) ** 2
__a : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__a : str = 1
__a : Dict = TFViTMAEForPreTraining(_lowerCamelCase )
__a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Optional[Any] = model(_lowerCamelCase , training=_lowerCamelCase )
__a : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
(__a) : List[str] = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[str] = TFViTMAEModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , tf.keras.layers.Layer ) )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : int = model_class(_lowerCamelCase )
__a : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCamelCase )
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : str = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = int((config.image_size // config.patch_size) ** 2 )
__a : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__a : List[str] = model_class(_lowerCamelCase )
__a : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = model(_lowerCamelCase , noise=_lowerCamelCase )
__a : List[str] = copy.deepcopy(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__a : Any = model(**_lowerCamelCase , noise=_lowerCamelCase )
__a : Optional[Any] = outputs_dict[0].numpy()
__a : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : str = self.model_tester.prepare_config_and_inputs_for_common()
__a : str = int((config.image_size // config.patch_size) ** 2 )
__a : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_UpperCAmelCase ):
__a : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_lowerCamelCase ):
__a : Dict = v.numpy()
else:
__a : Union[str, Any] = np.array(_lowerCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
__a : List[str] = model_class(_lowerCamelCase )
__a : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__a : int = prepare_numpy_arrays(_lowerCamelCase )
__a : str = model(_lowerCamelCase , noise=_lowerCamelCase )
__a : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
np.random.seed(2 )
__a : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__a : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : List[Any] = tf.constant(_lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__a : str = tf_noise
super().check_pt_tf_models(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : int = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_lowerCamelCase )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_lowerCamelCase , _lowerCamelCase ),)
if isinstance(_lowerCamelCase , _lowerCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_lowerCamelCase , '''_keras_serializable''' , _lowerCamelCase )
}
__a : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__a : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__a : Any = tf.convert_to_tensor(_lowerCamelCase )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__a : Optional[Any] = main_layer_class(_lowerCamelCase )
__a : List[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__a : Tuple = tf.keras.Model(_lowerCamelCase , outputs=main_layer(_lowerCamelCase ) )
__a : Dict = model(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = os.path.join(_lowerCamelCase , '''keras_model.h5''' )
model.save(_lowerCamelCase )
__a : Any = tf.keras.models.load_model(
_lowerCamelCase , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_lowerCamelCase , tf.keras.Model )
__a : Optional[int] = model(_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@slow
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
__a : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__a : Any = model_class(_lowerCamelCase )
__a : List[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
__a : Dict = outputs.last_hidden_state.numpy()
__a : List[Any] = 0
else:
__a : Any = outputs.logits.numpy()
__a : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
__a : List[Any] = model_class.from_pretrained(_lowerCamelCase )
__a : Any = model(_lowerCamelCase , noise=_lowerCamelCase )
if model_class.__name__ == "TFViTMAEModel":
__a : List[Any] = after_outputs['''last_hidden_state'''].numpy()
__a : Optional[Any] = 0
else:
__a : Dict = after_outputs['''logits'''].numpy()
__a : Dict = 0
__a : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCamelCase , 1e-5 )
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = int((config.image_size // config.patch_size) ** 2 )
__a : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__a : List[Any] = model_class(_lowerCamelCase )
__a : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
__a : str = model(_lowerCamelCase , noise=_lowerCamelCase )
__a : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_lowerCamelCase )
__a : Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__a : Any = model_class.from_config(model.config )
__a : int = new_model(_lowerCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
__a : Dict = new_model(_lowerCamelCase , noise=_lowerCamelCase )
self.assert_outputs_same(_lowerCamelCase , _lowerCamelCase )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _lowerCamelCase ( self ):
pass
@slow
def _lowerCamelCase ( self ):
__a : Tuple = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_lowerCamelCase )
def __A ( ) -> List[str]:
__a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
np.random.seed(2 )
__a : List[str] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__a : str = self.default_image_processor
__a : Dict = prepare_img()
__a : Union[str, Any] = image_processor(images=_lowerCamelCase , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__a : Tuple = ViTMAEConfig()
__a : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__a : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
__a : Union[str, Any] = model(**_lowerCamelCase , noise=_lowerCamelCase )
# verify the logits
__a : Dict = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__a : Dict = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) | 354 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A = datasets.logging.get_logger(__name__)
A = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
A = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
A = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.config_name == "default":
__a : List[str] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if gpus is None:
__a : str = 1 if torch.cuda.is_available() else 0
__a : Optional[Any] = {'''src''': sources, '''mt''': predictions, '''ref''': references}
__a : Dict = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
__a , __a : int = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores} | 188 | 0 |
'''simple docstring'''
A__ : Optional[int] ='''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__ : int =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__ : str ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 70 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( snake_case_ ):
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__snake_case , """num_encoder_blocks""" ) )
class UpperCAmelCase :
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Dict=13 , __snake_case : str=64 , __snake_case : Dict=3 , __snake_case : Dict=4 , __snake_case : Tuple=[2, 2, 2, 2] , __snake_case : int=[8, 4, 2, 1] , __snake_case : List[str]=[16, 32, 64, 1_28] , __snake_case : Optional[Any]=[1, 4, 8, 16] , __snake_case : Dict=[1, 2, 4, 8] , __snake_case : Optional[Any]=True , __snake_case : List[str]=True , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=3 , __snake_case : Tuple=None , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_encoder_blocks
_lowerCAmelCase = sr_ratios
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = downsampling_rates
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ) -> List[str]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Tuple:
_lowerCAmelCase = SegformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SegformerForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ) -> List[str]:
_lowerCAmelCase = 1
_lowerCAmelCase = SegformerForSemanticSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__snake_case )
_lowerCAmelCase = model(__snake_case , labels=__snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Optional[int] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowercase: Tuple = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase: Tuple = True
_lowercase: Union[str, Any] = False
_lowercase: Dict = False
_lowercase: Optional[Any] = False
def lowercase__ ( self : Tuple ) -> Any:
_lowerCAmelCase = SegformerModelTester(self )
_lowerCAmelCase = SegformerConfigTester(self , config_class=__snake_case )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Dict ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__snake_case )
def lowercase__ ( self : Dict ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__snake_case )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def lowercase__ ( self : int ) -> Union[str, Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def lowercase__ ( self : Optional[int] ) -> int:
pass
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(__snake_case ) , __snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_lowerCAmelCase = len(__snake_case )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + 1 , len(__snake_case ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : int ) -> List[str]:
def check_hidden_states_output(__snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ):
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(__snake_case ) , __snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Optional[Any] ) -> Any:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(__snake_case ):
continue
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.train()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = model(**__snake_case ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Tuple ) -> Dict:
pass
@slow
def lowercase__ ( self : str ) -> Optional[int]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SegformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Union[str, Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[Any] ) -> Any:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , __snake_case )
_lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __snake_case , atol=1E-1 ) )
@slow
def lowercase__ ( self : Any ) -> str:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__snake_case )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=__snake_case , return_tensors="""pt""" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(__snake_case )
with torch.no_grad():
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = outputs.logits.detach().cpu()
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
_lowerCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_lowerCAmelCase = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 70 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : Any ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase : List[str] ={
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
_lowercase : Union[str, Any] ={"allegro/herbert-base-cased": 514}
_lowercase : str ={}
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :int = VOCAB_FILES_NAMES
__lowerCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :str = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Dict = HerbertTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase="</s>" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sep_token=__lowercase , **__lowercase , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id]
a__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None , __lowercase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> List[int]:
"""simple docstring"""
a__ : str = [self.sep_token_id]
a__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
a__ : List[str] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 266 |
_lowercase : Optional[int] =[
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowerCAmelCase_ ( _lowercase : str) -> int:
"""simple docstring"""
a__ : Union[str, Any] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
a__ : List[Any] = 0
a__ : Dict = 0
while place < len(_lowercase):
if (place + 1 < len(_lowercase)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCAmelCase_ ( _lowercase : int) -> str:
"""simple docstring"""
a__ : Optional[Any] = []
for arabic, roman in ROMAN:
((a__) , (a__)) : Optional[Any] = divmod(_lowercase , _lowercase)
result.append(roman * factor)
if number == 0:
break
return "".join(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__A = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__A = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
lowercase__: Dict = []
for i in range(len(__UpperCAmelCase ) ):
lowercase__: Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase__: Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase__: List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: int = []
for _ in range(__UpperCAmelCase ):
# Create output image
lowercase__: Dict = Image.new('''RGB''' , (len(cells[0] ), len(__UpperCAmelCase )) )
lowercase__: List[str] = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
lowercase__: List[Any] = 2_5_5 - cells[y][x] * 2_5_5
lowercase__: Dict = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
lowercase__: List[str] = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
__A = generate_images(GLIDER, 1_6)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 177 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__snake_case ="""\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__snake_case ="""\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__snake_case ="""
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def a_ ( lowerCamelCase : str , lowerCamelCase : Union[str, Any] ):
return float((preds == labels).mean() )
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str="binary" ):
lowerCAmelCase = simple_accuracy(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average=lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
lowerCAmelCase = {}
for id_pred, label in zip(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = f'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowerCAmelCase = id_pred['prediction']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase = [(pred, label)]
lowerCAmelCase , lowerCAmelCase = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase = zip(*lowerCamelCase )
lowerCAmelCase = fa_score(y_true=lowerCamelCase , y_pred=lowerCamelCase , average='macro' )
fas.append(lowerCamelCase )
lowerCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCamelCase ) )
ems.append(lowerCamelCase )
lowerCAmelCase = float(sum(lowerCamelCase ) / len(lowerCamelCase ) )
lowerCAmelCase = sum(lowerCamelCase ) / len(lowerCamelCase )
lowerCAmelCase = float(fa_score(y_true=lowerCamelCase , y_pred=[id_pred['prediction'] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='numpy' if not self.config_name == 'record' and not self.config_name == 'multirc' else None , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"prediction_text": datasets.Value('string' ),
},
"references": {
"idx": {
"passage": datasets.Value('int64' ),
"query": datasets.Value('int64' ),
},
"answers": datasets.Sequence(datasets.Value('string' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('int64' ),
"paragraph": datasets.Value('int64' ),
"question": datasets.Value('int64' ),
},
"prediction": datasets.Value('int64' ),
},
"references": datasets.Value('int64' ),
}
else:
return {
"predictions": datasets.Value('int64' ),
"references": datasets.Value('int64' ),
}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase__ , UpperCAmelCase__ )}
elif self.config_name == "cb":
return acc_and_fa(UpperCAmelCase__ , UpperCAmelCase__ , fa_avg='macro' )
elif self.config_name == "record":
lowerCAmelCase = [
{
'qas': [
{'id': ref['idx']['query'], 'answers': [{'text': ans} for ans in ref['answers']]}
for ref in references
]
}
]
lowerCAmelCase = {pred['idx']['query']: pred['prediction_text'] for pred in predictions}
return evaluate_record(UpperCAmelCase__ , UpperCAmelCase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(UpperCAmelCase__ , UpperCAmelCase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]' )
| 4 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] , _a : List[Any] , _a : List[Any]=13 , _a : Optional[int]=7 , _a : int=6 , _a : Optional[int]=17 , _a : List[str]=23 , _a : List[Any]=11 , _a : str=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = act_dim
UpperCamelCase__ = state_dim
UpperCamelCase__ = hidden_size
UpperCamelCase__ = max_length
UpperCamelCase__ = is_training
def A_ ( self : Any ):
UpperCamelCase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCamelCase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCamelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCamelCase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
UpperCamelCase__ = random_attention_mask((self.batch_size, self.seq_length) )
UpperCamelCase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A_ ( self : Tuple ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A_ ( self : int , _a : Union[str, Any] , _a : Optional[Any] , _a : Dict , _a : str , _a : Optional[int] , _a : Any , _a : Union[str, Any] , ):
UpperCamelCase__ = DecisionTransformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase__ = model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A_ ( self : List[str] ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
UpperCamelCase__
) = config_and_inputs
UpperCamelCase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class __lowercase ( a__, a__, a__, unittest.TestCase ):
'''simple docstring'''
_A : Dict = (DecisionTransformerModel,) if is_torch_available() else ()
_A : Tuple = ()
_A : str = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_A : List[str] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_A : int = False
_A : Optional[int] = False
_A : Optional[int] = False
_A : List[Any] = False
_A : Tuple = False
_A : str = False
_A : int = False
_A : List[str] = False
_A : List[Any] = False
def A_ ( self : Tuple ):
UpperCamelCase__ = DecisionTransformerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def A_ ( self : Tuple ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
@slow
def A_ ( self : Optional[int] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = DecisionTransformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A_ ( self : str ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_lowerCamelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(_lowerCamelCase )] , _lowerCamelCase )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : Union[str, Any] ):
UpperCamelCase__ = 2 # number of steps of autoregressive prediction we will perform
UpperCamelCase__ = 10 # defined by the RL environment, may be normalized
UpperCamelCase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
UpperCamelCase__ = model.to(_lowerCamelCase )
UpperCamelCase__ = model.config
torch.manual_seed(0 )
UpperCamelCase__ = torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCamelCase , dtype=torch.floataa ) # env.reset()
UpperCamelCase__ = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=_lowerCamelCase )
UpperCamelCase__ = torch.tensor(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCamelCase__ = state
UpperCamelCase__ = torch.zeros(1 , 0 , config.act_dim , device=_lowerCamelCase , dtype=torch.floataa )
UpperCamelCase__ = torch.zeros(1 , 0 , device=_lowerCamelCase , dtype=torch.floataa )
UpperCamelCase__ = torch.tensor(0 , device=_lowerCamelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(_lowerCamelCase ):
UpperCamelCase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_lowerCamelCase )] , dim=1 )
UpperCamelCase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=_lowerCamelCase )] , dim=1 )
UpperCamelCase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCamelCase__ = model(
states=_lowerCamelCase , actions=_lowerCamelCase , rewards=_lowerCamelCase , returns_to_go=_lowerCamelCase , timesteps=_lowerCamelCase , attention_mask=_lowerCamelCase , return_dict=_lowerCamelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
UpperCamelCase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=_lowerCamelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCamelCase__ = action_pred[0, -1]
UpperCamelCase__ = torch.cat([states, state] , dim=1 )
UpperCamelCase__ = returns_to_go[0, -1] - reward
UpperCamelCase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCamelCase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=_lowerCamelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 369 | from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=None ):
'''simple docstring'''
require_version(deps[pkg], UpperCamelCase__ )
| 35 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : int = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__snake_case : str = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__snake_case : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__snake_case : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__snake_case : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__snake_case : Optional[int] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__snake_case : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__snake_case : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__snake_case : Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(lowerCAmelCase__ )
class lowerCamelCase :
'''simple docstring'''
def __call__( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Any] = False , lowerCAmelCase_ : str = False , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : Dict = None , lowerCAmelCase_ : List[str] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A__ : Optional[Any] =titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Union[str, Any] =titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A__ : int =texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A__ : List[Any] =len(UpperCamelCase__ )
A__ : int =questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f"There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts." )
A__ : Any =super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ : Any =super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ : int ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A__ : List[Any] =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ : List[str] =attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] = 16 , lowerCAmelCase_ : Union[str, Any] = 64 , lowerCAmelCase_ : Dict = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A__ : Any =reader_input["input_ids"]
A__ : Dict =reader_output[:3]
A__ : str =len(UpperCamelCase__ )
A__ : Union[str, Any] =sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A__ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
A__ : int =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ : Union[str, Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ : Any =sequence_ids.index(self.pad_token_id )
else:
A__ : Tuple =len(UpperCamelCase__ )
A__ : Union[str, Any] =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A__ : Any =[]
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ : List[Any] =sorted(UpperCamelCase__ , key=lambda lowerCAmelCase_ : x[1] , reverse=UpperCamelCase__ )
A__ : Optional[Any] =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
A__ : Any =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCAmelCase__ )
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = READER_PRETRAINED_VOCAB_FILES_MAP
__snake_case = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = READER_PRETRAINED_INIT_CONFIGURATION
__snake_case = ["""input_ids""", """attention_mask"""]
| 134 |
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 324 |
"""simple docstring"""
from __future__ import annotations
import time
_snake_case = list[tuple[int, int]]
_snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_snake_case = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Node | None ) -> List[str]:
_a : int = pos_x
_a : Union[str, Any] = pos_y
_a : Tuple = (pos_y, pos_x)
_a : Tuple = goal_x
_a : int = goal_y
_a : str = parent
class UpperCamelCase :
def __init__( self : List[Any] , UpperCAmelCase__ : tuple[int, int] , UpperCAmelCase__ : tuple[int, int] ) -> List[str]:
_a : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCAmelCase__ )
_a : Optional[int] = [self.start]
_a : Tuple = False
def _lowercase ( self : str ) -> Path | None:
while self.node_queue:
_a : Tuple = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(UpperCAmelCase__ )
_a : Tuple = self.get_successors(UpperCAmelCase__ )
for node in successors:
self.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node ) -> list[Node]:
_a : Optional[Any] = []
for action in delta:
_a : str = parent.pos_x + action[1]
_a : List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCAmelCase__ , UpperCAmelCase__ , self.target.pos_y , self.target.pos_x , UpperCAmelCase__ ) )
return successors
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Node | None ) -> Path:
_a : Dict = node
_a : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Any = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> Any:
_a : Dict = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = BreadthFirstSearch(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = False
def _lowercase ( self : Any ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_a : Union[str, Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : Optional[int] = True
return self.retrace_bidirectional_path(
UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = current_bwd_node
_a : int = current_fwd_node
_a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Node , UpperCAmelCase__ : Node ) -> Path:
_a : str = self.fwd_bfs.retrace_path(UpperCAmelCase__ )
_a : List[Any] = self.bwd_bfs.retrace_path(UpperCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
_a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_snake_case = (0, 0)
_snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_snake_case = time.time()
_snake_case = BreadthFirstSearch(init, goal)
_snake_case = bfs.search()
_snake_case = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_snake_case = time.time()
_snake_case = BidirectionalBreadthFirstSearch(init, goal)
_snake_case = bd_bfs.search()
_snake_case = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 324 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Tuple:
# Initialise PyTorch model
a = TaConfig.from_json_file(a )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(a , a , a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 0 |
from __future__ import annotations
def UpperCAmelCase__ ( _A : float , _A : float , _A : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 | 0 |
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : Any = size
UpperCamelCase : str = [0] * size
UpperCamelCase : Any = [0] * size
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return index | (index + 1)
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
return (index & (index + 1)) - 1
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : List[str] = value
while index < self.size:
UpperCamelCase : Dict = self.get_prev(SCREAMING_SNAKE_CASE_ ) + 1
if current_left_border == index:
UpperCamelCase : Optional[Any] = value
else:
UpperCamelCase : int = max(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_next(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
right -= 1 # Because of right is exclusive
UpperCamelCase : Union[str, Any] = 0
while left <= right:
UpperCamelCase : List[Any] = self.get_prev(SCREAMING_SNAKE_CASE_ )
if left <= current_left:
UpperCamelCase : Any = max(SCREAMING_SNAKE_CASE_, self.tree[right] )
UpperCamelCase : Union[str, Any] = current_left
else:
UpperCamelCase : Dict = max(SCREAMING_SNAKE_CASE_, self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : List[str]=False ) -> Optional[Any]:
try:
UpperCamelCase : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase : List[Any] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase : Optional[Any] = strtobool(snake_case__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__UpperCAmelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def UpperCamelCase ( snake_case__ : int ) -> str:
return unittest.skip('Test was skipped' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(snake_case__ )
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> List[Any]:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> int:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Any:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None ) -> Optional[Any]:
if test_case is None:
return partial(snake_case__ , version=snake_case__ )
return unittest.skipUnless(is_torch_version('>=' , snake_case__ ) , F"""test requires torch version >= {version}""" )(snake_case__ )
def UpperCamelCase ( snake_case__ : Dict ) -> Optional[Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(snake_case__ )
def UpperCamelCase ( snake_case__ : str ) -> Tuple:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(snake_case__ )
def UpperCamelCase ( snake_case__ : Any ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(snake_case__ )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def UpperCamelCase ( snake_case__ : Optional[Any] ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(snake_case__ )
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = True
@classmethod
def snake_case_ ( cls ) -> Any:
UpperCamelCase : Optional[Any] = tempfile.mkdtemp()
@classmethod
def snake_case_ ( cls ) -> Tuple:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def snake_case_ ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Optional[Any] = mocks if isinstance(SCREAMING_SNAKE_CASE_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def UpperCamelCase ( snake_case__ : Tuple ) -> Optional[int]:
UpperCamelCase : Tuple = AcceleratorState()
UpperCamelCase : Tuple = tensor[None].clone().to(state.device )
UpperCamelCase : str = gather(snake_case__ ).cpu()
UpperCamelCase : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , snake_case__ ):
return False
return True
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase : List[Any] = returncode
UpperCamelCase : Tuple = stdout
UpperCamelCase : Any = stderr
async def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] ) -> Union[str, Any]:
while True:
UpperCamelCase : List[str] = await stream.readline()
if line:
callback(snake_case__ )
else:
break
async def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int=None , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(snake_case__ ) )
UpperCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase : Tuple = []
UpperCamelCase : int = []
def tee(snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]="" ):
UpperCamelCase : Union[str, Any] = line.decode('utf-8' ).rstrip()
sink.append(snake_case__ )
if not quiet:
print(snake_case__ , snake_case__ , file=snake_case__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda snake_case__ : tee(snake_case__ , snake_case__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=snake_case__ , )
return _RunOutput(await p.wait() , snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Any=180 , snake_case__ : Any=False , snake_case__ : Optional[int]=True ) -> _RunOutput:
UpperCamelCase : int = asyncio.get_event_loop()
UpperCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case__ , env=snake_case__ , stdin=snake_case__ , timeout=snake_case__ , quiet=snake_case__ , echo=snake_case__ ) )
UpperCamelCase : str = ' '.join(snake_case__ )
if result.returncode > 0:
UpperCamelCase : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowerCAmelCase_ ( a__ ):
pass
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : str=False ) -> int:
try:
UpperCamelCase : Union[str, Any] = subprocess.check_output(snake_case__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(snake_case__ , 'decode' ):
UpperCamelCase : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(snake_case__ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 103 | 1 |
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = BertJapaneseTokenizer
A_ : Union[str, Any] = False
A_ : Optional[int] = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
__A = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Dict ):
'''simple docstring'''
__A = '''こんにちは、世界。 \nこんばんは、世界。'''
__A = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Any ):
'''simple docstring'''
__A , __A = self.get_input_output_texts(_lowerCamelCase )
__A = tokenizer.encode(_lowerCamelCase, add_special_tokens=_lowerCamelCase )
__A = tokenizer.decode(_lowerCamelCase, clean_up_tokenization_spaces=_lowerCamelCase )
return text, ids
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''mecab''' )
self.assertIsNotNone(_lowerCamelCase )
__A = '''こんにちは、世界。\nこんばんは、世界。'''
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(_lowerCamelCase, '''wb''' ) as handle:
pickle.dump(_lowerCamelCase, _lowerCamelCase )
with open(_lowerCamelCase, '''rb''' ) as handle:
__A = pickle.load(_lowerCamelCase )
__A = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
try:
__A = MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
try:
__A = MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = MecabTokenizer(do_lower_case=_lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
try:
__A = MecabTokenizer(
do_lower_case=_lowerCamelCase, normalize_text=_lowerCamelCase, mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = MecabTokenizer(normalize_text=_lowerCamelCase, mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''], )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(_lowerCamelCase )
__A = '''こんにちは、世界。\nこんばんは、世界。'''
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(_lowerCamelCase, '''wb''' ) as handle:
pickle.dump(_lowerCamelCase, _lowerCamelCase )
with open(_lowerCamelCase, '''rb''' ) as handle:
__A = pickle.load(_lowerCamelCase )
__A = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人''', '''参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = SudachiTokenizer(sudachi_dict_type='''core''', sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ), ['''外国人参政権'''] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = SudachiTokenizer(do_lower_case=_lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = SudachiTokenizer(normalize_text=_lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''], )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = SudachiTokenizer(trim_whitespace=_lowerCamelCase, sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''], )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file, word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(_lowerCamelCase )
__A = '''こんにちは、世界。\nこんばんは、世界。'''
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname, '''tokenizer.bin''' )
with open(_lowerCamelCase, '''wb''' ) as handle:
pickle.dump(_lowerCamelCase, _lowerCamelCase )
with open(_lowerCamelCase, '''rb''' ) as handle:
__A = pickle.load(_lowerCamelCase )
__A = tokenizer_new.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = JumanppTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = JumanppTokenizer(normalize_text=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''], )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = JumanppTokenizer(trim_whitespace=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ), ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''], )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ), ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''], )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__A = {}
for i, token in enumerate(_lowerCamelCase ):
__A = i
__A = WordpieceTokenizer(vocab=_lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ), ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ), ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
__A = tokenizer.subword_tokenizer
__A = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(_lowerCamelCase, ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
__A = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(_lowerCamelCase, ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
__A = tokenizer.encode('''ありがとう。''', add_special_tokens=_lowerCamelCase )
__A = tokenizer.encode('''どういたしまして。''', add_special_tokens=_lowerCamelCase )
__A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
__A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase, _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = BertJapaneseTokenizer
A_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
__A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__A = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Dict, **_lowerCamelCase : Any ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname, subword_tokenizer_type='''character''', **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Dict ):
'''simple docstring'''
__A = '''こんにちは、世界。 \nこんばんは、世界。'''
__A = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.tokenizer_class(self.vocab_file, subword_tokenizer_type='''character''' )
__A = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
_lowerCamelCase, ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ), [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__A = {}
for i, token in enumerate(_lowerCamelCase ):
__A = i
__A = CharacterTokenizer(vocab=_lowerCamelCase, unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ), [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ), ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
__A = tokenizer.encode('''ありがとう。''', add_special_tokens=_lowerCamelCase )
__A = tokenizer.encode('''どういたしまして。''', add_special_tokens=_lowerCamelCase )
__A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
__A = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase, _lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''cl-tohoku/bert-base-japanese'''
__A = AutoTokenizer.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase, _lowerCamelCase )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
__A = '''bert-base-cased'''
with self.assertLogs('''transformers''', level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(_lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 266 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
'''simple docstring'''
A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :int
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = hidden_states.shape
lowerCAmelCase__ :str = jax.image.resize(
__UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCAmelCase__ :Optional[Any] = self.conv(__UpperCAmelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :int
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.conv(__UpperCAmelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :int
__magic_name__ :int = None
__magic_name__ :float = 0.0
__magic_name__ :bool = None
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase__ :List[Any] = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCAmelCase__ :Optional[int] = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ :Optional[Any] = nn.Dense(__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ :Any = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCAmelCase__ :Optional[int] = nn.Dropout(self.dropout_prob )
lowerCAmelCase__ :int = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ :str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase__ :Dict = None
if use_nin_shortcut:
lowerCAmelCase__ :Union[str, Any] = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
lowerCAmelCase__ :Any = hidden_states
lowerCAmelCase__ :List[str] = self.norma(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = nn.swish(__UpperCAmelCase )
lowerCAmelCase__ :Any = self.conva(__UpperCAmelCase )
lowerCAmelCase__ :Dict = self.time_emb_proj(nn.swish(__UpperCAmelCase ) )
lowerCAmelCase__ :Optional[Any] = jnp.expand_dims(jnp.expand_dims(__UpperCAmelCase , 1 ) , 1 )
lowerCAmelCase__ :Optional[int] = hidden_states + temb
lowerCAmelCase__ :List[str] = self.norma(__UpperCAmelCase )
lowerCAmelCase__ :Any = nn.swish(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.dropout(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.conva(__UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCAmelCase__ :List[Any] = self.conv_shortcut(__UpperCAmelCase )
return hidden_states + residual
| 254 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = """pt"""
elif is_tf_available():
__A = """tf"""
else:
__A = """jax"""
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = ByTaTokenizer
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2_0 , __UpperCAmelCase=5 ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
for i in range(len(__UpperCAmelCase ) ):
try:
lowerCAmelCase__ :Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ :str = list(filter(lambda __UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
lowerCAmelCase__ :Optional[int] = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
lowerCAmelCase__ :List[str] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ :int = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ :Optional[Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
lowerCAmelCase__ :Dict = ' ' + output_txt
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.ta_base_tokenizer
lowerCAmelCase__ :str = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowerCAmelCase__ :Any = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.ta_base_tokenizer
lowerCAmelCase__ :int = 'Unicode €.'
lowerCAmelCase__ :Optional[int] = tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'Unicode €.</s>' )
lowerCAmelCase__ :Tuple = tokenizer('e è é ê ë' )
lowerCAmelCase__ :Any = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded['input_ids'] , __UpperCAmelCase )
# decoding
lowerCAmelCase__ :List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowerCAmelCase__ :Dict = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
lowerCAmelCase__ :Dict = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ :Dict = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.ta_base_tokenizer
lowerCAmelCase__ :Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , __UpperCAmelCase )
self.assertIn('attention_mask' , __UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , __UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.ta_base_tokenizer
lowerCAmelCase__ :Tuple = [
'Summary of the text.',
'Another summary.',
]
lowerCAmelCase__ :Union[str, Any] = tokenizer(
text_target=__UpperCAmelCase , max_length=3_2 , padding='max_length' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.ta_base_tokenizer
lowerCAmelCase__ :int = ['A long paragraph for summarization. </s>']
lowerCAmelCase__ :Tuple = ['Summary of the text. </s>']
# fmt: off
lowerCAmelCase__ :Union[str, Any] = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
lowerCAmelCase__ :Dict = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
lowerCAmelCase__ :List[Any] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch['input_ids'][0] )
self.assertEqual(__UpperCAmelCase , batch['labels'][0] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
lowerCAmelCase__ :List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :int = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ :str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ :Any = tempfile.mkdtemp()
lowerCAmelCase__ :Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCAmelCase__ :Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
lowerCAmelCase__ :Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :List[str] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ :Tuple = json.load(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [F"<extra_id_{i}>" for i in range(1_2_5 )]
lowerCAmelCase__ :List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowerCAmelCase__ :Union[str, Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ :Tuple = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ :Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ :List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([2_5_5] ) == '' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :List[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowerCAmelCase__ :List[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ :Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + '_id' , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + '_id' ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [] )
setattr(__UpperCAmelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 254 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self : int,_A : Tuple,_A : Union[str, Any]=14,_A : int=7,_A : Tuple=True,_A : Tuple=True,_A : int=False,_A : Dict=True,_A : List[Any]=99,_A : List[Any]=32,_A : Union[str, Any]=4,_A : List[str]=4,_A : str=4,_A : List[Any]=37,_A : int="gelu",_A : str=0.1,_A : Tuple=0.1,_A : Tuple=512,_A : int=0.02,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : str = vocab_size - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_ : int = vocab_size - 1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = GPTJConfig(
vocab_size=self.vocab_size,n_embd=self.hidden_size,n_layer=self.num_hidden_layers,n_head=self.num_attention_heads,n_positions=self.max_position_embeddings,use_cache=_A,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,rotary_dim=self.rotary_dim,)
return (config, input_ids, input_mask)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : int,_A : Tuple,_A : str,_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 20
SCREAMING_SNAKE_CASE_ : Any = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : Dict = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length),dtype="i4" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : str = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : Any = model(
input_ids[:, -1:],attention_mask=_A,past_key_values=outputs_cache.past_key_values,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Any = model(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Union[str, Any],_A : int,_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )],axis=-1,)
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : int = model(
input_ids[:, -1:],past_key_values=outputs_cache.past_key_values,attention_mask=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A,attention_mask=_A )
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
@require_flax
class a__ ( A__ , A__ , unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = FlaxGPTJModelTester(self )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A,_A,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A,_A,_A,_A )
@tooslow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained("gpt2",pad_token="<|endoftext|>",padding_side="left" )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(["Hello this is a long string", "Hey"],return_tensors="np",padding=_A,truncation=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Dict = jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ : str = jit_generate(
inputs["input_ids"],attention_mask=inputs["attention_mask"],pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(_A,skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A,_A )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : str = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict(),_A )
SCREAMING_SNAKE_CASE_ : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = model_class.from_pretrained(_A,from_pt=_A )
SCREAMING_SNAKE_CASE_ : str = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output_loaded[:, -1],pt_output[:, -1].numpy(),4E-2 )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Any = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(_A,fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[str] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Tuple = pt_model_class.from_pretrained(_A,from_flax=_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
@tooslow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 18 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , ):
"""simple docstring"""
a : Optional[int] = parent
a : Dict = 1_3
a : int = 7
a : Optional[int] = True
a : Tuple = True
a : Optional[Any] = True
a : Optional[int] = 9_9
a : Tuple = 3_2
a : Any = 2
a : Optional[int] = 4
a : str = 3_7
a : str = 'gelu'
a : Any = 0.1
a : List[str] = 0.1
a : Optional[int] = 5_1_2
a : Union[str, Any] = 1_6
a : Optional[Any] = 2
a : Optional[Any] = 0.02
a : Dict = 3
a : Optional[int] = 4
a : Tuple = None
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a : int = None
a : List[str] = None
a : int = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : List[Any] = self.prepare_config_and_inputs()
a : str = True
a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : int = {'input_ids': input_ids, 'attention_mask': input_mask}
a : Dict = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : Any = model(UpperCAmelCase_)
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Tuple = True
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
a : Tuple = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : str = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_)
# Also check the case where encoder outputs are not passed
a : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Optional[int] = TFEsmForMaskedLM(config=UpperCAmelCase_)
a : List[Any] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Dict = self.num_labels
a : Dict = TFEsmForTokenClassification(config=UpperCAmelCase_)
a : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Any = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : str = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Dict = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = TFEsmModelTester(self)
a : int = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = TFEsmModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a : Union[str, Any] = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable)
else:
a : int = model.get_output_embeddings()
assert x is None
a : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
a : List[str] = model(UpperCAmelCase_)[0]
a : Union[str, Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape) , UpperCAmelCase_)
# compare the actual values for a slice.
a : Any = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Tuple = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
a : Any = model(UpperCAmelCase_)[0]
# compare the actual values for a slice.
a : Dict = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowercase__ : int = 'facebook/wmt19-en-de'
lowercase__ : Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowercase__ : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowercase__ : Dict = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
lowercase__ : Any = tokenizer(['Making tiny model'], return_tensors='pt')
lowercase__ : Any = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
lowercase__ : List[str] = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 324 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def a__ ( lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def snake_case__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
_UpperCamelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = after_output[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-3 )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model}
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
_UpperCamelCase = model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
_UpperCamelCase = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = to_atuple(vision_model.config.image_size )
_UpperCamelCase = to_atuple(vision_model.config.patch_size )
_UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCamelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCamelCase = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
_UpperCamelCase = inputs_dict
_UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple()
_UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
_UpperCamelCase = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
_UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = config_inputs_dict.pop('''vision_config''' )
_UpperCamelCase = config_inputs_dict.pop('''text_config''' )
_UpperCamelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs()
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model_a(**lowerCAmelCase__ )
_UpperCamelCase = after_outputs[0]
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxViTModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxViTModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = vit_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , )
_UpperCamelCase = 13
_UpperCamelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCamelCase = random_attention_mask([batch_size, 4] )
_UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ )
_UpperCamelCase = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxCLIPVisionModelTester(self )
_UpperCamelCase = FlaxBertModelTester(self )
_UpperCamelCase = clip_model_tester.prepare_config_and_inputs()
_UpperCamelCase = bert_model_tester.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase = vision_config_and_inputs
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_UpperCamelCase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' )
_UpperCamelCase = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCamelCase = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
| 324 | 1 |
from math import ceil
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_01 ):
'''simple docstring'''
lowerCamelCase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ = 2 * i + 1
lowerCamelCase_ = 2 * i
lowerCamelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCamelCase : Any = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 208 |
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
if index == r:
for j in range(lowercase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 208 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
from datetime import datetime as dt
import os
from github import Github
A__ : List[str] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def UpperCamelCase( ):
lowerCAmelCase_ : Union[str, Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCAmelCase_ : Tuple = g.get_repo('''huggingface/transformers''' )
lowerCAmelCase_ : int = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCAmelCase_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase )
lowerCAmelCase_ : Tuple = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 103 | 1 |
import math
def lowerCamelCase_ ( _a : float , _a : float ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 59 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase_ = ['''small''', '''medium''', '''large''']
UpperCamelCase_ = '''lm_head.decoder.weight'''
UpperCamelCase_ = '''lm_head.weight'''
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.load(_a )
UpperCAmelCase_ : Tuple = d.pop(_a )
os.makedirs(_a , exist_ok=_a )
torch.save(_a , os.path.join(_a , _a ) )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
UpperCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase_ = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
UpperCamelCase_ = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 59 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _A ( unittest.TestCase ):
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = 10
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = [1, 2, 3, 4]
__UpperCAmelCase : List[str] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__UpperCAmelCase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__UpperCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__UpperCAmelCase , __UpperCAmelCase : Tuple = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Any = """"""
__UpperCAmelCase , __UpperCAmelCase : Dict = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
self.assertEqual(__UpperCAmelCase , [] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : str = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__UpperCAmelCase , __UpperCAmelCase : Any = process_story(__UpperCAmelCase )
__UpperCAmelCase : List[str] = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Dict = ["""It was the best of times."""]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = torch.tensor([1, 2, 3, 4] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__UpperCAmelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__UpperCAmelCase : List[str] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = 101
__UpperCAmelCase : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__UpperCAmelCase : Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__UpperCAmelCase : int = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase )
np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
| 254 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = credit_card_number
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
__UpperCAmelCase : Optional[int] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__UpperCAmelCase : Optional[int] = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowercase_ ( lowerCAmelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(lowerCAmelCase__ ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 254 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
snake_case_ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
snake_case_ = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
snake_case_ = '''|'''.join(sys.argv[1:])
snake_case_ = re.compile(rF'''^({joined_dirs}).*?\.py$''')
snake_case_ = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 353 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if weight_type is not None:
lowercase__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).shape
else:
lowercase__ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase__ : Optional[int] = value
elif weight_type == "weight_g":
lowercase__ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase__ : Tuple = value
elif weight_type == "bias":
lowercase__ : Any = value
else:
lowercase__ : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = fairseq_model.state_dict()
lowercase__ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ : List[str] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ : Tuple = True
if "*" in mapped_key:
lowercase__ : Any = name.split(SCREAMING_SNAKE_CASE_ )[0].split('.' )[-2]
lowercase__ : Optional[Any] = mapped_key.replace('*' , SCREAMING_SNAKE_CASE_ )
if "weight_g" in name:
lowercase__ : int = 'weight_g'
elif "weight_v" in name:
lowercase__ : Any = 'weight_v'
elif "bias" in name:
lowercase__ : str = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ : Union[str, Any] = 'weight'
else:
lowercase__ : Union[str, Any] = None
set_recursively(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = full_name.split('conv_layers.' )[-1]
lowercase__ : Dict = name.split('.' )
lowercase__ : List[str] = int(items[0] )
lowercase__ : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase__ : List[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase__ : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase__ : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase__ : int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : int=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ : Any = UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Dict = UniSpeechSatConfig()
lowercase__ : str = ''
if is_finetuned:
lowercase__ : Any = UniSpeechSatForCTC(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Optional[Any] = UniSpeechSatForPreTraining(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ , lowercase__ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ : List[str] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 216 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Optional[int] ,) -> Dict:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : str = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = 99
UpperCAmelCase_ : Union[str, Any] = 32
UpperCAmelCase_ : Any = 2
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : str = 37
UpperCAmelCase_ : List[Any] = """gelu"""
UpperCAmelCase_ : str = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : Dict = 512
UpperCAmelCase_ : List[Any] = 16
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = 0.0_2
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Optional[int] = None
def A__ ( self: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : str = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Optional[Any] ) -> int:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A__ ( self: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = TFEsmModel(config=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = [input_ids, input_mask]
UpperCAmelCase_ : Any = model(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,) -> List[Any]:
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Any = TFEsmModel(config=lowerCamelCase_ )
UpperCAmelCase_ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
UpperCAmelCase_ : Dict = model(lowerCamelCase_ )
UpperCAmelCase_ : int = [input_ids, input_mask]
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self: Tuple ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: int ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = TFEsmForMaskedLM(config=lowerCamelCase_ )
UpperCAmelCase_ : int = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: List[str] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: str ,lowerCamelCase_: Tuple ,lowerCamelCase_: int ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFEsmForTokenClassification(config=lowerCamelCase_ )
UpperCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: List[Any] ) -> List[Any]:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Union[str, Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A__ : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : int = False
A__ : int = False
def A__ ( self: List[Any] ) -> Tuple:
UpperCAmelCase_ : str = TFEsmModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: Union[str, Any] ) -> Dict:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: int ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def A__ ( self: str ) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self: List[str] ) -> Optional[int]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def A__ ( self: Tuple ) -> List[str]:
pass
def A__ ( self: Any ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCAmelCase_ : Optional[Any] = model.get_bias()
assert isinstance(lowerCamelCase_ ,lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ ,tf.Variable )
else:
UpperCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
assert x is None
UpperCAmelCase_ : Optional[int] = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: Tuple ) -> int:
UpperCAmelCase_ : Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Any = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,lowerCamelCase_ )
# compare the actual values for a slice.
UpperCAmelCase_ : Union[str, Any] = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
UpperCAmelCase_ : int = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase_ : Any = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 345 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A__ : str = [8, 5, 9, 7]
A__ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case__ :
def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None:
'''simple docstring'''
__snake_case : int = claim_vector
__snake_case : Optional[int] = allocated_resources_table
__snake_case : List[str] = maximum_claim_table
def A_ ( self : str ) -> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : int ) -> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : int ) -> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : str ) -> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(__a ): i for i in self.__need()}
def A_ ( self : Union[str, Any] , **__a : int ) -> None:
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : List[Any] = self.__allocated_resources_table
__snake_case : Optional[int] = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__snake_case : Tuple = False
for each_need in need_list:
__snake_case : Any = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__snake_case : List[str] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : str = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__snake_case : Union[str, Any] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(__a ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(_a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : int = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str]=0 ) -> List[str]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(_a )
else:
__lowerCamelCase : Any = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__lowerCamelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : List[str] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : str ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : str ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Dict = CLIPTextModel(_a )
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Dict , _a : int , _a : Union[str, Any]=0 ) -> Union[str, Any]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(_a )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__lowerCamelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
__lowerCamelCase : Optional[int] = 10.0
__lowerCamelCase : Tuple = 4
__lowerCamelCase : str = self.get_dummy_inputs(_a )
__lowerCamelCase : int = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Optional[Any] = pipe(**_a )[0]
__lowerCamelCase : Dict = self.get_dummy_inputs(_a )
__lowerCamelCase : List[str] = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Dict = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Union[str, Any] = steps
__lowerCamelCase : Any = scale
__lowerCamelCase : Optional[int] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Tuple = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : List[str] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowercase ( self : Optional[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : Dict ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : str ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : List[Any] = 'evil space-punk bird'
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase : Dict = pipe(
_a , _a , control_image=_a , generator=_a , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 208 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase ,__lowerCamelCase : List[Any] = image.size
__lowerCamelCase ,__lowerCamelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase : Optional[Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] )
__lowerCamelCase : List[Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
__lowerCamelCase : Optional[Any] = image[None].transpose(0 ,3 ,1 ,2 )
__lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : str , _a : VQModel , _a : UNetaDModel , _a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : Union[torch.Tensor, PIL.Image.Image] = None , _a : Optional[int] = 1 , _a : Optional[int] = 100 , _a : Optional[float] = 0.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : Any = 1
elif isinstance(_a , torch.Tensor ):
__lowerCamelCase : Any = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : List[str] = preprocess(_a )
__lowerCamelCase ,__lowerCamelCase : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase : Tuple = next(self.unet.parameters() ).dtype
__lowerCamelCase : Optional[int] = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__lowerCamelCase : Optional[int] = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__lowerCamelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : List[str] = {}
if accepts_eta:
__lowerCamelCase : str = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase : str = torch.cat([latents, image] , dim=1 )
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase : Union[str, Any] = self.vqvae.decode(_a ).sample
__lowerCamelCase : Union[str, Any] = torch.clamp(_a , -1.0 , 1.0 )
__lowerCamelCase : List[str] = image / 2 + 0.5
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = SpeechTaTokenizer
UpperCamelCase = False
UpperCamelCase = True
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = SpeechTaTokenizer(A)
_UpperCAmelCase = AddedToken('<mask>' , lstrip=A , rstrip=A)
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Optional[Any] , A : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def _lowerCamelCase ( self : Tuple , A : Any , A : List[str]=False , A : Union[str, Any]=20 , A : List[Any]=5) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
_UpperCAmelCase = tokenizer.decode(A , clean_up_tokenization_spaces=A)
return text, ids
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A)
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(A) , 81)
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def _lowerCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(A)
self.assertNotEqual(A , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd']
_UpperCAmelCase = tokenizer.add_tokens(A)
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(A)
self.assertNotEqual(A , 0)
self.assertEqual(A , A)
self.assertEqual(A , len(A))
self.assertEqual(A , all_size + len(A))
_UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=A)
self.assertGreaterEqual(len(A) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
_UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
_UpperCAmelCase = tokenizer.add_special_tokens(A)
_UpperCAmelCase = tokenizer.vocab_size
_UpperCAmelCase = len(A)
self.assertNotEqual(A , 0)
self.assertEqual(A , A)
self.assertEqual(A , len(A))
self.assertEqual(A , all_size_a + len(A))
_UpperCAmelCase = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=A)
self.assertGreaterEqual(len(A) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : str) -> Optional[int]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(A , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
# fmt: off
self.assertListEqual(A , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
_UpperCAmelCase = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=A , )
| 290 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''decision_transformer'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A : Optional[int]=17 , A : List[str]=4 , A : int=1_28 , A : Union[str, Any]=40_96 , A : Any=True , A : Any=1 , A : List[Any]=10_24 , A : List[Any]=3 , A : Tuple=1 , A : Any=None , A : Optional[int]="relu" , A : Union[str, Any]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1E-5 , A : List[Any]=0.0_2 , A : Tuple=True , A : Union[str, Any]=True , A : str=5_02_56 , A : Union[str, Any]=5_02_56 , A : List[Any]=False , A : Optional[int]=False , **A : int , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = state_dim
_UpperCAmelCase = act_dim
_UpperCAmelCase = hidden_size
_UpperCAmelCase = max_ep_len
_UpperCAmelCase = action_tanh
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_attn_by_inverse_layer_idx
_UpperCAmelCase = reorder_and_upcast_attn
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A)
| 290 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase :
def __init__(self : List[str] , snake_case__ : str = "cpu" , snake_case__ : str = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
snake_case : int = device
snake_case : str = CLIPTokenizerFast.from_pretrained(snake_case__ )
snake_case : List[Any] = [0.48145466, 0.4578275, 0.40821073]
snake_case : Optional[int] = [0.26862954, 0.26130258, 0.27577711]
snake_case : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case : List[str] = torchvision.transforms.Resize(2_24 )
snake_case : List[Any] = torchvision.transforms.CenterCrop(2_24 )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.resize(snake_case__ )
snake_case : Union[str, Any] = self.center_crop(snake_case__ )
snake_case : str = self.normalize(snake_case__ )
return images
def __call__(self : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , **snake_case__ : int ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.tokenizer(text=snake_case__ , **snake_case__ )
snake_case : str = self.preprocess_img(snake_case__ )
snake_case : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase ( nn.Module ):
def __init__(self : int , snake_case__ : Tuple=10 , snake_case__ : List[str]=0.01 , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Dict=None , snake_case__ : Optional[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , snake_case__ : Any=False , snake_case__ : Optional[Any]=True , snake_case__ : Any="image" , snake_case__ : str=True , snake_case__ : Any=False , snake_case__ : List[str]=False , snake_case__ : Optional[Any]=False , ) -> None:
'''simple docstring'''
super().__init__()
snake_case : List[str] = None
snake_case : Optional[int] = device if device else get_device()
if vqgan:
snake_case : List[Any] = vqgan
else:
snake_case : Tuple = load_vqgan(self.device , conf_path=snake_case__ , ckpt_path=snake_case__ )
self.vqgan.eval()
if clip:
snake_case : Any = clip
else:
snake_case : Optional[Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
snake_case : Optional[Any] = ProcessorGradientFlow(device=self.device )
snake_case : Optional[int] = iterations
snake_case : str = lr
snake_case : List[Any] = log
snake_case : Optional[int] = make_grid
snake_case : str = return_val
snake_case : List[Any] = quantize
snake_case : List[str] = self.vqgan.decoder.z_shape
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Any=5 , snake_case__ : Optional[Any]=True ) -> Any:
'''simple docstring'''
snake_case : List[Any] = []
if output_path is None:
snake_case : List[Any] = "./animation.gif"
if input_path is None:
snake_case : Any = self.save_path
snake_case : int = sorted(glob(input_path + "/*" ) )
if not len(snake_case__ ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(snake_case__ ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
snake_case : List[Any] = total_duration / len(snake_case__ )
snake_case : Union[str, Any] = [frame_duration] * len(snake_case__ )
if extend_frames:
snake_case : Union[str, Any] = 1.5
snake_case : int = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(snake_case__ ) )
imageio.mimsave(snake_case__ , snake_case__ , duration=snake_case__ )
print(f"""gif saved to {output_path}""" )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : str=None , snake_case__ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
snake_case : Optional[Any] = preprocess(Image.open(snake_case__ ) , target_image_size=2_56 ).to(self.device )
snake_case : Any = preprocess_vqgan(snake_case__ )
snake_case , *snake_case : str = self.vqgan.encode(snake_case__ )
return z
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.latent.detach().requires_grad_()
snake_case : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case , *snake_case : Any = self.vqgan.quantize(snake_case__ )
else:
snake_case : Union[str, Any] = trans_latent
return self.vqgan.decode(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int]=None ) -> str:
'''simple docstring'''
snake_case : Any = self.clip_preprocessor(text=snake_case__ , images=snake_case__ , return_tensors="pt" , padding=snake_case__ )
snake_case : Optional[int] = self.clip(**snake_case__ )
snake_case : List[Any] = clip_outputs.logits_per_image
if weights is not None:
snake_case : Any = similarity_logits * weights
return similarity_logits.sum()
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = self._get_clip_similarity(pos_prompts["prompts"] , snake_case__ , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
snake_case : Union[str, Any] = self._get_clip_similarity(neg_prompts["prompts"] , snake_case__ , weights=neg_prompts["weights"] )
else:
snake_case : Union[str, Any] = torch.tensor([1] , device=self.device )
snake_case : List[Any] = -torch.log(snake_case__ ) + torch.log(snake_case__ )
return loss
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = torch.randn_like(self.latent , requires_grad=snake_case__ , device=self.device )
snake_case : Optional[int] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case : Dict = self._add_vector(snake_case__ )
snake_case : Union[str, Any] = loop_post_process(snake_case__ )
snake_case : List[str] = self._get_CLIP_loss(snake_case__ , snake_case__ , snake_case__ )
print("CLIP loss" , snake_case__ )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=snake_case__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
wandb.init(reinit=snake_case__ , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
snake_case : Optional[Any] = Image.open(snake_case__ )
snake_case : Dict = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not prompts:
return []
snake_case : Tuple = []
snake_case : List[str] = []
if isinstance(snake_case__ , snake_case__ ):
snake_case : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(snake_case__ , (tuple, list) ):
snake_case : Tuple = prompt[0]
snake_case : Optional[int] = float(prompt[1] )
elif ":" in prompt:
snake_case , snake_case : Optional[int] = prompt.split(":" )
snake_case : List[str] = float(snake_case__ )
else:
snake_case : Optional[Any] = prompt
snake_case : Tuple = 1.0
processed_prompts.append(snake_case__ )
weights.append(snake_case__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case__ , device=self.device ),
}
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , snake_case__ : Union[str, Any]=True , snake_case__ : Any=False , snake_case__ : List[Any]=True , snake_case__ : List[Any]=True , snake_case__ : Dict=None , ) -> Any:
'''simple docstring'''
if image_path:
snake_case : List[Any] = self._get_latent(snake_case__ )
else:
snake_case : Optional[int] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case__ , snake_case__ , snake_case__ )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case : Optional[Any] = self.process_prompts(snake_case__ )
snake_case : List[Any] = self.process_prompts(snake_case__ )
if save_final and save_path is None:
snake_case : Tuple = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(snake_case__ ):
os.makedirs(snake_case__ )
else:
snake_case : int = save_path + "_" + get_timestamp()
os.makedirs(snake_case__ )
snake_case : Tuple = save_path
snake_case : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(snake_case__ ) )
snake_case : List[str] = loop_post_process(snake_case__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case__ , snake_case__ , snake_case__ ) ):
if show_intermediate:
show_pil(snake_case__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(snake_case__ )} )
if show_final:
show_pil(snake_case__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 59 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class UpperCAmelCase ( A_ ):
A__ : jnp.ndarray
@flax_register_to_config
class UpperCAmelCase ( nn.Module ,A_ ,A_ ):
A__ : int = 32
A__ : int = 4
A__ : int = 4
A__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ : Union[bool, Tuple[bool]] = False
A__ : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
A__ : int = 2
A__ : Union[int, Tuple[int]] = 8
A__ : Optional[Union[int, Tuple[int]]] = None
A__ : int = 12_80
A__ : float = 0.0
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
A__ : bool = True
A__ : int = 0
A__ : bool = False
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
snake_case : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case : Any = jnp.zeros(snake_case__ , dtype=jnp.floataa )
snake_case : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
snake_case : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case , snake_case : Optional[int] = jax.random.split(snake_case__ )
snake_case : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def _SCREAMING_SNAKE_CASE (self : str ) -> Tuple:
'''simple docstring'''
snake_case : str = self.block_out_channels
snake_case : Optional[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case : Tuple = self.num_attention_heads or self.attention_head_dim
# input
snake_case : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case : Dict = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
snake_case : List[str] = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
snake_case : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case : List[Any] = []
snake_case : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case : List[Any] = output_channel
snake_case : Dict = block_out_channels[i]
snake_case : Optional[Any] = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Union[str, Any] = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
snake_case : Dict = down_blocks
# mid
snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case : Optional[Any] = []
snake_case : Optional[int] = list(reversed(snake_case__ ) )
snake_case : Dict = list(reversed(snake_case__ ) )
snake_case : Tuple = list(reversed(snake_case__ ) )
snake_case : Optional[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case : Optional[int] = output_channel
snake_case : List[Any] = reversed_block_out_channels[i]
snake_case : Union[str, Any] = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )]
snake_case : int = i == len(snake_case__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case : Any = FlaxCrossAttnUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case : Optional[int] = FlaxUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case__ )
snake_case : Optional[int] = output_channel
snake_case : Tuple = up_blocks
# out
snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : bool = True , snake_case__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(snake_case__ , jnp.ndarray ):
snake_case : List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case : Any = timesteps.astype(dtype=jnp.floataa )
snake_case : int = jnp.expand_dims(snake_case__ , 0 )
snake_case : str = self.time_proj(snake_case__ )
snake_case : str = self.time_embedding(snake_case__ )
# 2. pre-process
snake_case : int = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
snake_case : List[Any] = self.conv_in(snake_case__ )
# 3. down
snake_case : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
snake_case , snake_case : List[Any] = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
snake_case , snake_case : str = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case : Tuple = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case__ , snake_case__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case : Optional[int] = new_down_block_res_samples
# 4. mid
snake_case : Optional[int] = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case : int = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = up_block(
snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , )
else:
snake_case : Dict = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train )
# 6. post-process
snake_case : List[str] = self.conv_norm_out(snake_case__ )
snake_case : Any = nn.silu(snake_case__ )
snake_case : Optional[int] = self.conv_out(snake_case__ )
snake_case : Union[str, Any] = jnp.transpose(snake_case__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case__ )
| 59 | 1 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _a :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = None
UpperCamelCase__ = None
def lowercase ( _SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(_SCREAMING_SNAKE_CASE : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_SCREAMING_SNAKE_CASE : TreeNode | None , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(_SCREAMING_SNAKE_CASE , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 |
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ):
__a : str = len(lowerCAmelCase__ )
__a : Optional[int] = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a : str = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a : Tuple = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 216 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCamelCase: Union[str, Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
_UpperCamelCase: int = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class a__ :
_lowerCamelCase = 42
_lowerCamelCase = 42
class a__ :
def __init__( self : Optional[Any], lowerCAmelCase : Iterable[int] ) -> None:
lowercase : Node | None = None
for i in sorted(lowerCAmelCase, reverse=lowerCAmelCase ):
lowercase : Optional[int] = Node(lowerCAmelCase, self.head )
def __iter__( self : int ) -> Iterator[int]:
lowercase : int = self.head
while node:
yield node.data
lowercase : List[str] = node.next_node
def __len__( self : str ) -> int:
return sum(1 for _ in self )
def __str__( self : Optional[Any] ) -> str:
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase: Tuple = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 361 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Tuple, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : int = 8, **lowerCAmelCase : Optional[int], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = do_rescale
lowercase : Tuple = rescale_factor
lowercase : List[str] = do_pad
lowercase : int = pad_size
def lowercase ( self : List[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : float, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int ) -> np.ndarray:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
lowercase , lowercase : Tuple = get_image_size(lowerCAmelCase )
lowercase : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase : Dict = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[float] = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Any, ) -> List[Any]:
lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_pad if do_pad is not None else self.do_pad
lowercase : int = pad_size if pad_size is not None else self.pad_size
lowercase : Tuple = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowercase : List[str] = [self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowercase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53 | 0 |
def _a ( a :str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( __lowercase):
"""simple docstring"""
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : Dict = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowercase ( self : Dict ) -> int:
with self.assertRaises(_a ):
lowerCAmelCase_ : Tuple = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowercase ( self : List[str] ) -> Optional[int]:
with self.assertRaises(_a ):
lowerCAmelCase_ : Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : Any = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ : str = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : str = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowercase ( self : Optional[int] ) -> Tuple:
lowerCAmelCase_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowercase ( self : Any ) -> Optional[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __lowercase ( self : int ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowercase ( self : List[Any] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowercase ( self : Any ) -> int:
import PIL.Image
lowerCAmelCase_ : int = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=_a ) as mock_cast_to_python_objects:
lowerCAmelCase_ : Optional[int] = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
lowerCAmelCase_, lowerCAmelCase_ : str = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , _a )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
lowerCAmelCase_ : int = pa.ipc.open_stream(__a )
lowerCAmelCase_ : Optional[int] = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase_ ( A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = pa.BufferOutputStream()
lowerCAmelCase_ : Dict = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_, lowerCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = pa.BufferOutputStream()
lowerCAmelCase_ : str = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ : Union[str, Any] = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ : Tuple = pa.ipc.open_stream(__a )
lowerCAmelCase_ : Tuple = f.read_all()
lowerCAmelCase_ : List[str] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
lowerCAmelCase_, lowerCAmelCase_ : int = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
lowerCAmelCase_, lowerCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase_ ( A__ : List[str] , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = pa.BufferOutputStream()
lowerCAmelCase_ : Optional[int] = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ : Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = pa.BufferOutputStream()
lowerCAmelCase_ : str = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ : int = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def UpperCamelCase_ ( A__ : Dict , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : str = pa.BufferOutputStream()
lowerCAmelCase_ : Tuple = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
lowerCAmelCase_, lowerCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def UpperCamelCase_ ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Optional[int] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
lowerCAmelCase_ : Dict = os.path.join(__a , """test.arrow""" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def UpperCamelCase_ ( A__ : Tuple , A__ : Any ):
'''simple docstring'''
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
lowerCAmelCase_ : Dict = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase_ ( A__ : Any , A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Dict = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def UpperCamelCase_ ( A__ : Tuple , A__ : str , A__ : Optional[int] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ : List[Any] = copy.deepcopy(__a )
lowerCAmelCase_ : int = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
lowerCAmelCase_ : Any = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def UpperCamelCase_ ( A__ : Dict , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def UpperCamelCase_ ( A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """mock://dataset-train.arrow"""
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Dict = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_, lowerCAmelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ : List[str] = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ : List[str] = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def UpperCamelCase_ ( A__ : Tuple , A__ : Dict ):
'''simple docstring'''
import PIL.Image
lowerCAmelCase_ : str = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" )
lowerCAmelCase_ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
lowerCAmelCase_ : Optional[int] = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ : Union[str, Any] = pq.read_table(__a )
lowerCAmelCase_ : Optional[int] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __a )
with open(__a , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] )
lowerCAmelCase_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 350 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> int:
lowerCAmelCase_ : Tuple = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : str = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Tuple ) -> Optional[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCAmelCase_ : str = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase_ : List[str] = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : str = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> List[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCAmelCase_ : Any = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Dict ) -> Any:
lowerCAmelCase_ : Optional[int] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : int = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 89 | 0 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase__ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
lowercase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase__ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase__ = 'allenai'
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
a__: Union[str, Any] = dict((re.sub(r'@@$' , '' , _SCREAMING_SNAKE_CASE ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _SCREAMING_SNAKE_CASE ), v) for k, v in d.items() )
a__: List[str] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
a__: Any = d[k] # restore
return da
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
# prep
assert os.path.exists(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
a__: Optional[int] = basename(_SCREAMING_SNAKE_CASE )
a__: str = dirname(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
a__: List[Any] = cls.hub_models()
a__: str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
a__: int = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
a__: List[str] = hub_utils.from_pretrained(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , archive_map=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
a__: int = vars(chkpt['args']['model'] )
a__: int = args['source_lang']
a__: Optional[Any] = args['target_lang']
a__: Optional[int] = dirname(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = basename(_SCREAMING_SNAKE_CASE )
# dicts
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , F'dict.{src_lang}.txt' )
a__: Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , F'dict.{tgt_lang}.txt' )
a__: int = Dictionary.load(_SCREAMING_SNAKE_CASE )
a__: Tuple = rewrite_dict_keys(src_dict.indices )
a__: Optional[int] = len(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab-src.json' )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
a__: str = True
for k in src_vocab.keys():
if not k.islower():
a__: int = False
break
a__: List[str] = Dictionary.load(_SCREAMING_SNAKE_CASE )
a__: int = rewrite_dict_keys(tgt_dict.indices )
a__: int = len(_SCREAMING_SNAKE_CASE )
a__: Tuple = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab-tgt.json' )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# merges_file (bpecodes)
a__: Dict = os.path.join(_SCREAMING_SNAKE_CASE , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.exists(_SCREAMING_SNAKE_CASE ):
break
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as fin:
a__: Dict = fin.read()
a__: List[Any] = re.sub(r' \d+$' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as fout:
fout.write(_SCREAMING_SNAKE_CASE )
# model config
a__: List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
a__: Any = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
a__: Optional[Any] = 5
a__: Union[str, Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
a__: str = best_score_hparams[model_dir]['length_penalty']
else:
a__: Tuple = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# tokenizer config
a__: List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE , indent=_SCREAMING_SNAKE_CASE ) )
# model
a__: Union[str, Any] = chkpt['models'][0]
a__: str = model.state_dict()
# rename keys to start with 'model.'
a__: Optional[int] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
a__: Union[str, Any] = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Dict = FSMTConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
a__: int = FSMTForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# check that it loads ok
model_new.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
# save
a__: Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 290 | """simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290 | 1 |
import numpy as np
class A :
def __init__( self : List[Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None ) -> List[str]:
"""simple docstring"""
self.set_matricies(red=_lowerCAmelCase , green=_lowerCAmelCase , blue=_lowerCAmelCase , red_edge=_lowerCAmelCase , nir=_lowerCAmelCase )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
if red is not None:
_a = red
if green is not None:
_a = green
if blue is not None:
_a = blue
if red_edge is not None:
_a = red_edge
if nir is not None:
_a = nir
return True
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[Any]="" , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=None ) -> List[Any]:
"""simple docstring"""
self.set_matricies(red=_lowerCAmelCase , green=_lowerCAmelCase , blue=_lowerCAmelCase , red_edge=_lowerCAmelCase , nir=_lowerCAmelCase )
_a = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[int]=0.0_8 , lowerCAmelCase_ : Optional[Any]=1.2_2 , lowerCAmelCase_ : Optional[int]=0.0_3 ) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir / self.green) - 1
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def __lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return self.nir - self.green
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[Any]=0.1_6 ) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=0.5 ) -> int:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def __lowerCAmelCase ( self : int , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return (self.red + self.green + self.blue) / 3_0.5
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return self.nir / self.red
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_a = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_a = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.nir / self.red
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 354 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 179 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase : Union[str, Any] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCAmelCase : Optional[int] = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
lowerCAmelCase : Any = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
lowerCAmelCase : Tuple = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
lowerCAmelCase : Optional[Any] = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase : Optional[int] = np.expand_dims(test_image, axis=0)
lowerCAmelCase : Optional[Any] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase : Tuple = """Normal"""
if result[0][0] == 1:
lowerCAmelCase : List[str] = """Abnormality detected"""
| 13 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a__ ( __lowercase ):
def __init__( self , *A , A=None , A=None , **A ) -> List[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
a = eval_examples
a = post_process_function
def lowerCAmelCase_ ( self , A=None , A=None , A=None , A = "eval" ) -> Tuple:
'''simple docstring'''
a = self.eval_dataset if eval_dataset is None else eval_dataset
a = self.get_eval_dataloader(UpperCAmelCase__ )
a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a = self.compute_metrics
a = None
a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a = time.time()
try:
a = eval_loop(
UpperCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
a = compute_metrics
a = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
a = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def lowerCAmelCase_ ( self , A , A , A=None , A = "test" ) -> Optional[Any]:
'''simple docstring'''
a = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
a = self.compute_metrics
a = None
a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a = time.time()
try:
a = eval_loop(
UpperCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
a = compute_metrics
a = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a = self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , "predict" )
a = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a = metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
| 361 |
lowercase__ : Optional[int] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 180 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> List[Any]:
_lowerCAmelCase : Optional[int] = torch.exp(__lowercase )
_lowerCAmelCase : Union[str, Any] = torch.sum(__lowercase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Tuple = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__lowercase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : Union[str, Any] = nn.ModuleList([BertLayer(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Union[str, Any] = nn.ModuleList([BertHighway(__A ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(__A ) is float) or (type(__A ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Union[str, Any] = x
else:
_lowerCAmelCase : List[Any] = x
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Tuple = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : Any = all_hidden_states + (hidden_states,)
_lowerCAmelCase : Tuple = layer_module(
__A , __A , head_mask[i] , __A , __A )
_lowerCAmelCase : Optional[int] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Union[str, Any] = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Tuple = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : str = current_outputs + (all_attentions,)
_lowerCAmelCase : Tuple = self.highway[i](__A )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Union[str, Any] = highway_exit[0]
_lowerCAmelCase : int = entropy(__A )
_lowerCAmelCase : Optional[int] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__A , i + 1 )
else:
_lowerCAmelCase : str = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : Optional[int] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : List[Any] = outputs + (all_attentions,)
_lowerCAmelCase : Dict = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __lowerCamelCase , )
class __A ( __lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(__A )
_lowerCAmelCase : str = config
_lowerCAmelCase : Tuple = BertEmbeddings(__A )
_lowerCAmelCase : Optional[Any] = DeeBertEncoder(__A )
_lowerCAmelCase : Optional[int] = BertPooler(__A )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__A )
@add_start_docstrings_to_model_forward(__A )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : str = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : Dict = torch.ones(__A , device=__A )
if encoder_attention_mask is None:
_lowerCAmelCase : List[str] = torch.ones(__A , device=__A )
if token_type_ids is None:
_lowerCAmelCase : List[str] = torch.zeros(__A , dtype=torch.long , device=__A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : Dict = self.get_extended_attention_mask(__A , __A , __A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : List[str] = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[int] = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[Any] = self.get_head_mask(__A , self.config.num_hidden_layers )
_lowerCAmelCase : List[str] = self.embeddings(
input_ids=__A , position_ids=__A , token_type_ids=__A , inputs_embeds=__A )
_lowerCAmelCase : Optional[Any] = self.encoder(
__A , attention_mask=__A , head_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
_lowerCAmelCase : int = encoder_outputs[0]
_lowerCAmelCase : Optional[int] = self.pooler(__A )
_lowerCAmelCase : Tuple = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( __lowerCamelCase ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : List[str] = message
_lowerCAmelCase : Optional[int] = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : List[Any] = BertPooler(__A )
_lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Any = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : Optional[int] = self.pooler(__A )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Optional[Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : str = bmodel_output[1]
_lowerCAmelCase : List[str] = self.dropout(__A )
_lowerCAmelCase : Dict = self.classifier(__A )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowerCamelCase , )
class __A ( __lowerCamelCase ):
def __init__( self , a__ ):
super().__init__(__A )
_lowerCAmelCase : Dict = config.num_labels
_lowerCAmelCase : Tuple = config.num_hidden_layers
_lowerCAmelCase : Tuple = DeeBertModel(__A )
_lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__A )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : Tuple = self.bert(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : int = outputs[1]
_lowerCAmelCase : Any = self.dropout(__A )
_lowerCAmelCase : int = self.classifier(__A )
_lowerCAmelCase : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Optional[Any] = e.message
_lowerCAmelCase : List[str] = e.exit_layer
_lowerCAmelCase : Dict = outputs[0]
if not self.training:
_lowerCAmelCase : List[str] = entropy(__A )
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : List[Any] = CrossEntropyLoss()
_lowerCAmelCase : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : int = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : int = highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Union[str, Any] = MSELoss()
_lowerCAmelCase : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : str = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : int = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : List[str] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "levit"
def __init__( self: str , UpperCamelCase: int=2_24 , UpperCamelCase: int=3 , UpperCamelCase: Optional[Any]=3 , UpperCamelCase: Union[str, Any]=2 , UpperCamelCase: str=1 , UpperCamelCase: Any=16 , UpperCamelCase: str=[1_28, 2_56, 3_84] , UpperCamelCase: Dict=[4, 8, 12] , UpperCamelCase: str=[4, 4, 4] , UpperCamelCase: Dict=[16, 16, 16] , UpperCamelCase: Optional[Any]=0 , UpperCamelCase: List[Any]=[2, 2, 2] , UpperCamelCase: Any=[2, 2, 2] , UpperCamelCase: str=0.02 , **UpperCamelCase: Tuple , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
A__ = image_size
A__ = num_channels
A__ = kernel_size
A__ = stride
A__ = padding
A__ = hidden_sizes
A__ = num_attention_heads
A__ = depths
A__ = key_dim
A__ = drop_path_rate
A__ = patch_size
A__ = attention_ratio
A__ = mlp_ratio
A__ = initializer_range
A__ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return 1e-4
| 69 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , *UpperCamelCase: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Any=None ):
"""simple docstring"""
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self: Union[str, Any] , UpperCamelCase: Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase: Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = load_image(UpperCamelCase )
A__ = self.image_processor(images=UpperCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.model(**UpperCamelCase )
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: int=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__ , A__ = probs.topk(UpperCamelCase )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(UpperCamelCase , k=UpperCamelCase )
A__ , A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
| 69 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Tuple = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __snake_case ( lowerCAmelCase ):
_a : Tuple= "vit"
def __init__( self ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=224 ,snake_case=16 ,snake_case=3 ,snake_case=True ,snake_case=16 ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Union[str, Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[Any] = intermediate_size
lowercase : str = hidden_act
lowercase : Tuple = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : Tuple = initializer_range
lowercase : int = layer_norm_eps
lowercase : Optional[Any] = image_size
lowercase : Any = patch_size
lowercase : List[Any] = num_channels
lowercase : Dict = qkv_bias
lowercase : List[Any] = encoder_stride
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 20 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase_ ( __lowercase ):
'''simple docstring'''
a__ = '''vivit'''
def __init__( self : Optional[int] , __lowerCamelCase : List[Any]=2_24 , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[Any]=[2, 16, 16] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=7_68 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Optional[Any]=30_72 , __lowerCamelCase : Dict="gelu_fast" , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=1e-06 , __lowerCamelCase : Any=True , **__lowerCamelCase : Dict , ) -> str:
A : Union[str, Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : str = initializer_range
A : Tuple = layer_norm_eps
A : Union[str, Any] = image_size
A : Optional[int] = num_frames
A : Dict = tubelet_size
A : List[str] = num_channels
A : Optional[Any] = qkv_bias
super().__init__(**__lowerCamelCase ) | 363 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
for attribute in key.split("." ):
A : Tuple = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
A : List[str] = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
A : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
A : List[str] = value
elif weight_type == "weight_g":
A : List[Any] = value
elif weight_type == "weight_v":
A : Any = value
elif weight_type == "bias":
A : Dict = value
else:
A : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Dict = []
A : List[str] = fairseq_model.state_dict()
A : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
A : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
A : Dict = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A : List[str] = True
if "*" in mapped_key:
A : Any = name.split(_lowerCamelCase )[0].split("." )[-2]
A : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
A : Optional[Any] = "weight_g"
elif "weight_v" in name:
A : int = "weight_v"
elif "weight" in name:
A : Dict = "weight"
elif "bias" in name:
A : Optional[int] = "bias"
else:
A : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[int] = full_name.split("conv_layers." )[-1]
A : str = name.split("." )
A : str = int(items[0] )
A : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
A : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
A : Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
A : str = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
A : Union[str, Any] = SEWConfig()
if is_finetuned:
A : int = model.wav_encoder.wav_model.cfg
else:
A : Any = model.cfg
A : List[str] = fs_config.conv_bias
A : Optional[Any] = eval(fs_config.conv_feature_layers )
A : Dict = [x[0] for x in conv_layers]
A : int = [x[1] for x in conv_layers]
A : Dict = [x[2] for x in conv_layers]
A : Tuple = "gelu"
A : List[Any] = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
A : List[str] = 0.0
A : Union[str, Any] = fs_config.activation_fn.name
A : int = fs_config.encoder_embed_dim
A : List[str] = 0.02
A : Dict = fs_config.encoder_ffn_embed_dim
A : List[str] = 1e-5
A : List[str] = fs_config.encoder_layerdrop
A : Optional[int] = fs_config.encoder_attention_heads
A : Any = fs_config.conv_pos_groups
A : str = fs_config.conv_pos
A : str = len(_lowerCamelCase )
A : Optional[int] = fs_config.encoder_layers
A : List[str] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A : int = model.cfg
A : List[str] = fs_config.final_dropout
A : Optional[int] = fs_config.layerdrop
A : Dict = fs_config.activation_dropout
A : Dict = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A : Dict = fs_config.attention_dropout
A : Optional[Any] = fs_config.dropout_input
A : Union[str, Any] = fs_config.dropout
A : Tuple = fs_config.mask_channel_length
A : int = fs_config.mask_channel_prob
A : Optional[Any] = fs_config.mask_length
A : Union[str, Any] = fs_config.mask_prob
A : int = "Wav2Vec2FeatureExtractor"
A : List[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
if is_finetuned:
A , A , A : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A , A , A : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A : Any = SEWConfig.from_pretrained(_lowerCamelCase )
else:
A : List[str] = convert_config(model[0] , _lowerCamelCase )
A : List[str] = model[0].eval()
A : Optional[int] = True if config.feat_extract_norm == "layer" else False
A : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
if is_finetuned:
if dict_path:
A : Union[str, Any] = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A : str = target_dict.pad_index
A : Dict = target_dict.bos_index
A : List[str] = target_dict.pad_index
A : Any = target_dict.bos_index
A : str = target_dict.eos_index
A : Dict = len(target_dict.symbols )
A : List[Any] = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
A : str = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
A : int = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
A : Optional[int] = SEWForCTC(_lowerCamelCase )
else:
A : Dict = SEWModel(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 256 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = TransfoXLTokenizer
_UpperCamelCase : List[str] = False
_UpperCamelCase : int = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : int = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **a__ ):
_lowerCAmelCase : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = """<unk> UNwanted , running"""
_lowerCAmelCase : List[str] = """<unk> unwanted, running"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(a__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [0, 4, 8, 7] )
def __A ( self ):
_lowerCAmelCase : Dict = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Any = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = TransfoXLTokenizer(lower_case=a__ )
_lowerCAmelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_lowerCAmelCase : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(a__ ) , a__ )
self.assertEqual(tokenizer.convert_tokens_to_string(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = len(a__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(a__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 44 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """bert"""
def __init__( self , __lowerCamelCase=3_0522 , __lowerCamelCase=768 , __lowerCamelCase=12 , __lowerCamelCase=12 , __lowerCamelCase=3072 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-1_2 , __lowerCamelCase=0 , __lowerCamelCase="absolute" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
__A : Dict = vocab_size
__A : Any = hidden_size
__A : str = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = hidden_act
__A : List[Any] = intermediate_size
__A : Tuple = hidden_dropout_prob
__A : Optional[int] = attention_probs_dropout_prob
__A : Optional[int] = max_position_embeddings
__A : Optional[Any] = type_vocab_size
__A : Optional[Any] = initializer_range
__A : Dict = layer_norm_eps
__A : Any = position_embedding_type
__A : Optional[int] = use_cache
__A : str = classifier_dropout
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCamelCase__( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__A : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 179 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A = random.Random()
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple=1.0 ,lowerCAmelCase_ : Optional[int]=None ,lowerCAmelCase_ : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCAmelCase_= global_rng
UpperCAmelCase_= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int=7 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : Optional[int]=2_000 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=16_000 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=80 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Tuple=64 , __UpperCAmelCase : Any="hann_window" , __UpperCAmelCase : int=80 , __UpperCAmelCase : List[str]=7_600 , __UpperCAmelCase : Optional[Any]=1E-10 , __UpperCAmelCase : List[Any]=True , ) -> Union[str, Any]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= min_seq_length
UpperCAmelCase_= max_seq_length
UpperCAmelCase_= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_= feature_size
UpperCAmelCase_= padding_value
UpperCAmelCase_= sampling_rate
UpperCAmelCase_= do_normalize
UpperCAmelCase_= num_mel_bins
UpperCAmelCase_= hop_length
UpperCAmelCase_= win_length
UpperCAmelCase_= win_function
UpperCAmelCase_= fmin
UpperCAmelCase_= fmax
UpperCAmelCase_= mel_floor
UpperCAmelCase_= return_attention_mask
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Dict=False ) -> str:
def _flatten(__UpperCAmelCase : Optional[int] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
UpperCAmelCase_= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCAmelCase_= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_= [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : int=False ) -> Optional[Any]:
if equal_length:
UpperCAmelCase_= [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_= [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCAmelCase_= [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : int = SpeechTaFeatureExtractor
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
UpperCAmelCase_= SpeechTaFeatureExtractionTester(self )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCAmelCase_= feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase_= feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
UpperCAmelCase_= feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
UpperCAmelCase_= feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase_= [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_= range(800 , 1_400 , 200 )
UpperCAmelCase_= [floats_list((1, x) )[0] for x in lengths]
UpperCAmelCase_= ["""longest""", """max_length""", """do_not_pad"""]
UpperCAmelCase_= [None, 1_600, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
UpperCAmelCase_= processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding="""max_length""" , return_tensors="""np""" )
UpperCAmelCase_= processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase_= processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_000 , padding="""longest""" , return_tensors="""np""" )
UpperCAmelCase_= processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_= np.random.rand(100 ).astype(np.floataa )
UpperCAmelCase_= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_= feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCAmelCase_= feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_= [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
UpperCAmelCase_= [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_= feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
UpperCAmelCase_= feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCAmelCase_= feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test batched
UpperCAmelCase_= feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
UpperCAmelCase_= feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_= [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_= np.asarray(__UpperCAmelCase )
UpperCAmelCase_= feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
UpperCAmelCase_= feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_= feat_extract.model_input_names[0]
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
UpperCAmelCase_= processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_= batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_= feat_extract.model_input_names[0]
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
UpperCAmelCase_= processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase_= batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_= self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_= feat_extract.model_input_names[0]
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_= feat_extract.num_mel_bins # hack!
UpperCAmelCase_= feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
UpperCAmelCase_= feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= self.feat_extract_dict
UpperCAmelCase_= True
UpperCAmelCase_= self.feature_extraction_class(**__UpperCAmelCase )
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_= [len(__UpperCAmelCase ) for x in speech_inputs]
UpperCAmelCase_= feat_extract.model_input_names[0]
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_= feat_extract.num_mel_bins # hack!
UpperCAmelCase_= feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
UpperCAmelCase_= self.feat_extract_dict
UpperCAmelCase_= True
UpperCAmelCase_= self.feature_extraction_class(**__UpperCAmelCase )
UpperCAmelCase_= self.feat_extract_tester.prepare_inputs_for_target()
UpperCAmelCase_= [len(__UpperCAmelCase ) for x in speech_inputs]
UpperCAmelCase_= feat_extract.model_input_names[0]
UpperCAmelCase_= BatchFeature({input_name: speech_inputs} )
UpperCAmelCase_= min(__UpperCAmelCase )
UpperCAmelCase_= feat_extract.num_mel_bins # hack!
UpperCAmelCase_= feat_extract.pad(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
from datasets import load_dataset
UpperCAmelCase_= load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCAmelCase_= ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_= torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
UpperCAmelCase_= self._load_datasamples(1 )
UpperCAmelCase_= SpeechTaFeatureExtractor()
UpperCAmelCase_= feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , __UpperCAmelCase , atol=1E-6 ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
# fmt: off
UpperCAmelCase_= torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
UpperCAmelCase_= self._load_datasamples(1 )
UpperCAmelCase_= SpeechTaFeatureExtractor()
UpperCAmelCase_= feature_extractor(audio_target=__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __UpperCAmelCase , atol=1E-4 ) )
| 277 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : Tuple=None ,lowerCAmelCase_ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
return field(default_factory=lambda: default ,metadata=lowerCAmelCase_ )
@dataclass
class lowercase :
"""simple docstring"""
a__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
a__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"})
a__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Use FP16 to accelerate inference."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Benchmark training of model"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Verbose memory tracing"})
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
a__ : bool = field(
default=snake_case__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
a__ : bool = field(default=snake_case__ , metadata={"help": "Trace memory line by line"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save result to a CSV file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save all print statements in a log file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Whether to print environment information"})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
a__ : str = field(
default=F'inference_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , )
a__ : str = field(
default=F'inference_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
a__ : str = field(
default=F'train_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
a__ : str = field(
default=F'train_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
a__ : str = field(
default=F'env_info_{round(time())}.csv' , metadata={"help": "CSV filename used if saving environment information."} , )
a__ : str = field(
default=F'log_{round(time())}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , )
a__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 277 | 1 |
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def A () -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 51 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
_SCREAMING_SNAKE_CASE = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = VOCAB_FILES_NAMES
lowerCamelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Optional[Any] = BertTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="[UNK]" , lowerCAmelCase_="[SEP]" , lowerCAmelCase_="[PAD]" , lowerCAmelCase_="[CLS]" , lowerCAmelCase_="[MASK]" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[str]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
_A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
_A = getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) )
_A = do_lower_case
_A = strip_accents
_A = tokenize_chinese_chars
_A = normalizer_class(**lowerCAmelCase_ )
_A = do_lower_case
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> List[str]:
_A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
_A = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 180 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCAmelCase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : str , *_lowercase : Tuple , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(_lowercase )
def a ( self : int , _lowercase : Dict=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Dict ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , _lowercase : Union["Image.Image", str] , _lowercase : str = None , **_lowercase : Optional[Any] ):
if isinstance(_lowercase , (Image.Image, str) ) and isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = {'''image''': image, '''question''': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
return results
def a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Any=False , _lowercase : Union[str, Any]=False ):
__UpperCAmelCase = load_image(inputs['''image'''] )
__UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_lowercase , truncation=_lowercase )
__UpperCAmelCase = self.image_processor(images=_lowercase , return_tensors=self.framework )
model_inputs.update(_lowercase )
return model_inputs
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = self.model(**_lowercase )
return model_outputs
def a ( self : str , _lowercase : Optional[int] , _lowercase : Any=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_lowercase , _lowercase )]
| 86 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Tuple = RoCBertTokenizer
a__ : List[Any] = None
a__ : List[Any] = False
a__ : Dict = True
a__ : int = filter_non_english
def a ( self : Optional[int] ):
super().setUp()
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for i, value in enumerate(_lowercase ):
__UpperCAmelCase = i
__UpperCAmelCase = i
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_lowercase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def a ( self : List[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def a ( self : List[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def a ( self : Optional[int] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : Any ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def a ( self : int ):
__UpperCAmelCase = RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__UpperCAmelCase = {}
for i, token in enumerate(_lowercase ):
__UpperCAmelCase = i
__UpperCAmelCase = RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def a ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def a ( self : Dict ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def a ( self : Optional[int] ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def a ( self : Tuple ):
__UpperCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
__UpperCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def a ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
__UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(_lowercase , '''do_lower_case''' ) else False
__UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def a ( self : Dict ):
__UpperCAmelCase = ['''的''', '''人''', '''有''']
__UpperCAmelCase = ''''''.join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = True
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
__UpperCAmelCase = False
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__UpperCAmelCase = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(_lowercase )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def a ( self : List[Any] ):
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCAmelCase = tokenizer.encode('''你好''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.encode('''你是谁''' , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def a ( self : List[str] ):
__UpperCAmelCase = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = '''你好,你是谁'''
__UpperCAmelCase = tokenizer.tokenize(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_shape_ids(_lowercase )
__UpperCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
__UpperCAmelCase = tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
__UpperCAmelCase = tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 86 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = data
snake_case_ = None
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = None
snake_case_ = None
def __iter__( self) -> Iterator[Any]:
snake_case_ = self.head
while self.head:
yield node.data
snake_case_ = node.next
if node == self.head:
break
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(len(self), lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> None:
self.insert_nth(0, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
if index < 0 or index > len(self):
raise IndexError('list index out of range.')
snake_case_ = Node(lowerCAmelCase__)
if self.head is None:
snake_case_ = new_node # first node points itself
snake_case_ = snake_case_ = new_node
elif index == 0: # insert at head
snake_case_ = self.head
snake_case_ = snake_case_ = new_node
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = new_node
if index == len(self) - 1: # insert at tail
snake_case_ = new_node
def a_ ( self) -> str:
return self.delete_nth(0)
def a_ ( self) -> Any:
return self.delete_nth(len(self) - 1)
def a_ ( self, lowerCAmelCase__ = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError('list index out of range.')
snake_case_ = self.head
if self.head == self.tail: # just one node
snake_case_ = snake_case_ = None
elif index == 0: # delete head node
snake_case_ = self.tail.next.next
snake_case_ = self.head.next
else:
snake_case_ = self.head
for _ in range(index - 1):
snake_case_ = temp.next
snake_case_ = temp.next
snake_case_ = temp.next.next
if index == len(self) - 1: # delete at tail
snake_case_ = temp
return delete_node.data
def a_ ( self) -> bool:
return len(self) == 0
def UpperCAmelCase ( ) -> None:
snake_case_ = CircularLinkedList()
assert len(UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCAmelCase ) == i
circular_linked_list.insert_nth(UpperCAmelCase , i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__UpperCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__UpperCamelCase = TaTokenizerFast
__UpperCamelCase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__UpperCamelCase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 69 | 1 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : float , lowercase : int = 1_6000 ) -> Optional[Any]:
_a = int(round(sample_rate * max_length ) )
if len(lowercase ) <= sample_length:
return wav
_a = randint(0 , len(lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(default=lowerCamelCase_ , metadata={'help': 'Name of a dataset from the datasets package'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'A file containing the training audio paths and labels.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'A file containing the validation audio paths and labels.'} )
__a =field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
__a =field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
__a =field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
__a =field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__a =field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
__a =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
__a =field(
default=lowerCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCamelCase__ ( self : Tuple ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def _lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_a = DatasetDict()
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_a = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_a = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_a = feature_extractor.model_input_names[0]
def train_transforms(lowercase : Tuple ):
_a = []
for audio in batch[data_args.audio_column_name]:
_a = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase )
_a = feature_extractor(lowercase , sampling_rate=feature_extractor.sampling_rate )
_a = {model_input_name: inputs.get(lowercase )}
_a = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase : Dict ):
_a = [audio["array"] for audio in batch[data_args.audio_column_name]]
_a = feature_extractor(lowercase , sampling_rate=feature_extractor.sampling_rate )
_a = {model_input_name: inputs.get(lowercase )}
_a = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_a = raw_datasets["train"].features[data_args.label_column_name].names
_a , _a = {}, {}
for i, label in enumerate(lowercase ):
_a = str(lowercase )
_a = label
# Load the accuracy metric from the datasets package
_a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase : Dict ):
_a = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase , references=eval_pred.label_ids )
_a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_a = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_a = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase , output_all_columns=lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_a = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase , output_all_columns=lowercase )
# Initialize our trainer
_a = Trainer(
model=lowercase , args=lowercase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_a = trainer.evaluate()
trainer.log_metrics("eval" , lowercase )
trainer.save_metrics("eval" , lowercase )
# Write model card and (optionally) push to hub
_a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase_ : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _lowerCamelCase ( lowercase : str ) -> Optional[int]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCamelCase ( lowercase : Dict ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
| 346 | 1 |
lowercase_ = "Alexander Joslin"
import operator as op
from .stack import Stack
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
A__ = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE__ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE__ )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
operand_stack.push(SCREAMING_SNAKE_CASE__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowercase_ = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 7 | """simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase = 299_792_458
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = symbols("""ct x y z""")
def lowercase ( a__ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowercase ( a__ : float ) -> float:
return 1 / sqrt(1 - beta(a__ ) ** 2 )
def lowercase ( a__ : float ) -> np.ndarray:
return np.array(
[
[gamma(a__ ), -gamma(a__ ) * beta(a__ ), 0, 0],
[-gamma(a__ ) * beta(a__ ), gamma(a__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase ( a__ : float , a__ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_UpperCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase = transform(29_979_245)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 256 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase: int = pytest.mark.integration
@require_faiss
class a__( lowerCamelCase__ ):
def lowercase_ ( self : List[str] ):
a : Optional[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self : Tuple ):
import faiss
a : Dataset = self._create_dummy_dataset()
a : Any = dset.map(
lambda __snake_case , __snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__snake_case , keep_in_memory=__snake_case )
a : Optional[int] = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase_ ( self : List[str] ):
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase_ ( self : str ):
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
a , a : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase_ ( self : Optional[int] ):
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self : Union[str, Any] ):
from elasticsearch import Elasticsearch
a : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a : str = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
a : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
a : str = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__snake_case )
a , a : Union[str, Any] = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a__( lowerCamelCase__ ):
def lowercase_ ( self : Tuple ):
import faiss
a : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a : Dict = np.zeros(5 , dtype=np.floataa )
a : int = 1
a , a : Tuple = index.search(__snake_case )
self.assertRaises(__snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
a , a : Dict = index.search_batch(__snake_case )
self.assertRaises(__snake_case , index.search_batch , queries[0] )
a : List[str] = [scores[0] for scores in total_scores]
a : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __snake_case )
def lowercase_ ( self : Optional[Any] ):
import faiss
a : List[str] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a : Any = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__snake_case ):
a : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self : Optional[Any] ):
import faiss
a : List[Any] = faiss.IndexFlat(5 )
a : Tuple = FaissIndex(custom_index=__snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self : Union[str, Any] ):
import faiss
a : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
index.save(tmp_file.name )
a : Any = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a : int = np.zeros(5 , dtype=np.floataa )
a : Union[str, Any] = 1
a , a : List[Any] = index.search(__snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( _A ):
import faiss
a : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a : List[Any] = 'index.faiss'
a : Optional[Any] = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
a : str = FaissIndex.load(_A , storage_options=mockfs.storage_options )
a : Tuple = np.zeros(5 , dtype=np.floataa )
a : List[Any] = 1
a , a : str = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a__( lowerCamelCase__ ):
def lowercase_ ( self : List[Any] ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a : Union[str, Any] = Elasticsearch()
a : Optional[Any] = {'acknowledged': True}
a : List[str] = ElasticSearchIndex(es_client=__snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
a : Dict = 'foo'
a : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a , a : List[Any] = index.search(__snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a : Optional[Any] = 'foo'
a : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a , a : Tuple = index.search(__snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a : Tuple = ['foo', 'bar', 'foobar']
a : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a , a : Any = index.search_batch(__snake_case )
a : Union[str, Any] = [scores[0] for scores in total_scores]
a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case )
# batched queries with timeout
a : int = ['foo', 'bar', 'foobar']
a : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a , a : int = index.search_batch(__snake_case , request_timeout=30 )
a : List[Any] = [scores[0] for scores in total_scores]
a : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case ) | 96 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase: Any = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : List[str] , __snake_case : bool = True , __snake_case : int = 32 , __snake_case : Union[str, Any]=PILImageResampling.BILINEAR , __snake_case : bool = True , **__snake_case : List[Any] , ):
a : Optional[Any] = do_resize
a : Union[str, Any] = do_rescale
a : Union[str, Any] = size_divisor
a : List[Any] = resample
super().__init__(**__snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Tuple ):
a , a : Optional[int] = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a : int = height // size_divisor * size_divisor
a : int = width // size_divisor * size_divisor
a : Any = resize(__snake_case , (new_h, new_w) , resample=__snake_case , data_format=__snake_case , **__snake_case )
return image
def lowercase_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : float , __snake_case : Optional[ChannelDimension] = None , **__snake_case : Optional[Any] ):
return rescale(image=__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __snake_case : Optional[bool] = None , __snake_case : Optional[int] = None , __snake_case : Dict=None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[TensorType, str]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Any , ):
a : List[str] = do_resize if do_resize is not None else self.do_resize
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = size_divisor if size_divisor is not None else self.size_divisor
a : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a : Tuple = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a : str = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a : int = [self.resize(__snake_case , size_divisor=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
a : List[str] = [self.rescale(__snake_case , scale=1 / 2_55 ) for image in images]
a : Any = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
a : Any = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case ) | 96 | 1 |
def lowercase_ (A : int , A : int ):
return base * power(A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
a_ :Optional[int] = int(input("Enter the base: ").strip())
a_ :Optional[int] = int(input("Enter the exponent: ").strip())
a_ :Union[str, Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a_ :Tuple = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 277 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ :Optional[Any] = logging.getLogger(__name__)
def lowercase_ (A : List[Any] , A : List[Any] ):
# save results
if os.path.exists(A ):
if os.path.exists(os.path.join(A , 'config.json' ) ) and os.path.isfile(
os.path.join(A , 'config.json' ) ):
os.remove(os.path.join(A , 'config.json' ) )
if os.path.exists(os.path.join(A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(A , 'pytorch_model.bin' ) )
else:
os.makedirs(A )
model.save_pretrained(A )
def lowercase_ (A : Any , A : Optional[Any]=False ):
snake_case__ : str = 2
if unlogit:
snake_case__ : Dict = torch.pow(A , A )
snake_case__ : Any = p * torch.log(A )
snake_case__ : Tuple = 0
return -plogp.sum(dim=-1 )
def lowercase_ (A : List[str] ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(A ) ) ) )
for row in range(len(A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase_ (A : Tuple , A : Optional[Any] , A : str , A : int=True , A : Optional[int]=True , A : Any=None , A : int=False ):
snake_case__ , snake_case__ : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case__ : int = torch.zeros(A , A ).to(args.device )
snake_case__ : Any = torch.zeros(A , A ).to(args.device )
if head_mask is None:
snake_case__ : Dict = torch.ones(A , A ).to(args.device )
head_mask.requires_grad_(requires_grad=A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = 0.0
snake_case__ : str = 0.0
for step, inputs in enumerate(tqdm(A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
snake_case__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((snake_case__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case__ : Union[str, Any] = model(A , labels=A , head_mask=A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case__ , snake_case__ , snake_case__ : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A ):
snake_case__ : Optional[Any] = entropy(attn.detach() , A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case__ : Union[str, Any] = 2
snake_case__ : List[Any] = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case__ : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(A )
logger.info('Head ranked by importance scores' )
snake_case__ : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case__ : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
snake_case__ : str = head_ranks.view_as(A )
print_ad_tensor(A )
return attn_entropy, head_importance, total_loss
def lowercase_ (A : Optional[int] , A : Dict , A : Optional[int] ):
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(A , A , A , compute_entropy=A )
snake_case__ : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , A , original_score * args.masking_threshold )
snake_case__ : Optional[Any] = torch.ones_like(A )
snake_case__ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case__ : Dict = original_score
while current_score >= original_score * args.masking_threshold:
snake_case__ : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case__ : List[Any] = float('Inf' )
snake_case__ : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
snake_case__ : int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
snake_case__ : int = new_head_mask.view(-1 )
snake_case__ : int = 0.0
snake_case__ : Union[str, Any] = new_head_mask.view_as(A )
snake_case__ : List[str] = new_head_mask.clone().detach()
print_ad_tensor(A )
# Compute metric and head importance again
snake_case__ , snake_case__ , snake_case__ : Any = compute_heads_importance(
A , A , A , compute_entropy=A , head_mask=A )
snake_case__ : Dict = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ (A : List[str] , A : Tuple , A : Optional[Any] , A : int ):
snake_case__ : Any = datetime.now()
snake_case__ , snake_case__ , snake_case__ : str = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A )
snake_case__ : Tuple = 1 / loss
snake_case__ : Dict = datetime.now() - before_time
snake_case__ : Union[str, Any] = sum(p.numel() for p in model.parameters() )
snake_case__ : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) )
}
for k, v in heads_to_prune.items():
if isinstance(A , A ):
snake_case__ : Any = [
v,
]
assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A )
snake_case__ : Dict = sum(p.numel() for p in model.parameters() )
snake_case__ : Tuple = datetime.now()
snake_case__ , snake_case__ , snake_case__ : Dict = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , )
snake_case__ : Any = 1 / loss
snake_case__ : int = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , A , A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , A , A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(A , args.output_dir )
def lowercase_ ():
snake_case__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=A , type=A , required=A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=A , type=A , required=A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=A , type=A , required=A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=A , type=A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=A , help='Batch size.' )
parser.add_argument('--seed' , type=A , default=4_2 )
parser.add_argument('--local_rank' , type=A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A , default='' , help='Can be used for distant debugging.' )
snake_case__ : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
snake_case__ : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case__ : int = torch.device('cuda' , args.local_rank )
snake_case__ : List[str] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case__ : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case__ : List[str] = nn.parallel.DistributedDataParallel(
A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A )
elif args.n_gpu > 1:
snake_case__ : Optional[int] = nn.DataParallel(A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A )
torch.save(A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , A )
# Prepare dataset
snake_case__ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case__ : List[str] = (torch.from_numpy(A ),)
snake_case__ : int = TensorDataset(*A )
snake_case__ : Union[str, Any] = RandomSampler(A )
snake_case__ : Any = DataLoader(A , sampler=A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A , A , A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case__ : Dict = mask_heads(A , A , A )
prune_heads(A , A , A , A )
if __name__ == "__main__":
main()
| 277 | 1 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] =None
lowercase : Dict =None
@property
def UpperCamelCase ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''sampling_rate''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''padding_value''' ) )
def UpperCamelCase ( self ):
lowercase_ :Tuple = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :Optional[int] = feat_extract.model_input_names[0]
lowercase_ :Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) for x, y in zip(UpperCamelCase_ , processed_features[input_name] ) ) )
lowercase_ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ )
lowercase_ :List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase_ :Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ :Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ )
lowercase_ :int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :Dict = feat_extract.model_input_names[0]
lowercase_ :str = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase_ :Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ :Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase_ )
lowercase_ :str = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :Dict = feat_extract.model_input_names[0]
lowercase_ :List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase_ :List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase_ :Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase ( self , UpperCamelCase_=False ):
def _inputs_have_equal_length(UpperCamelCase_ ):
lowercase_ :Tuple = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase_ ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
if not np.allclose(np.asarray(UpperCamelCase_ ) , np.asarray(UpperCamelCase_ ) , atol=1E-3 ):
return False
return True
lowercase_ :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase_ )
lowercase_ :Tuple = feat_extract.model_input_names[0]
lowercase_ :Union[str, Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ :List[str] = self.feat_extract_tester.seq_length_diff
lowercase_ :Any = self.feat_extract_tester.max_seq_length + pad_diff
lowercase_ :List[str] = self.feat_extract_tester.min_seq_length
lowercase_ :List[str] = self.feat_extract_tester.batch_size
lowercase_ :Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase_ :Any = feat_extract.pad(UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase_ :Any = input_a[input_name]
lowercase_ :List[str] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' )
lowercase_ :Dict = input_a[input_name]
lowercase_ :int = feat_extract.pad(UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase_ :List[str] = input_a[input_name]
lowercase_ :Any = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''np''' )
lowercase_ :Any = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase_ ):
feat_extract.pad(UpperCamelCase_ , padding='''max_length''' )[input_name]
lowercase_ :List[Any] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :List[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase_ :List[Any] = feat_extract.pad(UpperCamelCase_ , pad_to_multiple_of=10 )
lowercase_ :Optional[Any] = input_a[input_name]
lowercase_ :List[Any] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , pad_to_multiple_of=10 )
lowercase_ :Optional[Any] = input_a[input_name]
lowercase_ :Union[str, Any] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCamelCase_ )
lowercase_ :Union[str, Any] = input_a[input_name]
lowercase_ :List[str] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCamelCase_ , return_tensors='''np''' , )
lowercase_ :Tuple = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase_ :Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCamelCase_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase_ :Union[str, Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def UpperCamelCase ( self , UpperCamelCase_=False ):
def _inputs_have_equal_length(UpperCamelCase_ ):
lowercase_ :Dict = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase_ ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
if not np.allclose(np.asarray(UpperCamelCase_ ) , np.asarray(UpperCamelCase_ ) , atol=1E-3 ):
return False
return True
lowercase_ :Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase_ )
lowercase_ :Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ :Optional[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase_ :Dict = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase_ )
lowercase_ :Union[str, Any] = input_a[input_name]
lowercase_ :str = feat_extract.pad(UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase_ :Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) )
# truncate to smallest with np
lowercase_ :int = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=UpperCamelCase_ , )
lowercase_ :int = input_a[input_name]
lowercase_ :List[Any] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase_ :Union[str, Any] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) )
# truncate to middle
lowercase_ :Optional[Any] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase_ , return_tensors='''np''' , )
lowercase_ :List[Any] = input_a[input_name]
lowercase_ :List[str] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase_ )
lowercase_ :str = input_a[input_name]
lowercase_ :Optional[int] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase_ :int = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase_ , UpperCamelCase_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase_ ):
feat_extract.pad(UpperCamelCase_ , truncation=UpperCamelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase_ ):
feat_extract.pad(UpperCamelCase_ , padding='''longest''' , truncation=UpperCamelCase_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase_ ):
feat_extract.pad(UpperCamelCase_ , padding='''longest''' , truncation=UpperCamelCase_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase_ ):
feat_extract.pad(UpperCamelCase_ , padding='''max_length''' , truncation=UpperCamelCase_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase_ :List[str] = 12
lowercase_ :Any = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase_ , truncation=UpperCamelCase_ , )
lowercase_ :str = input_a[input_name]
lowercase_ :Tuple = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase_ , )
lowercase_ :Union[str, Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase_ :Optional[int] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase_ :str = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase_ ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase_ ) )
def UpperCamelCase ( self ):
self._check_padding(numpify=UpperCamelCase_ )
def UpperCamelCase ( self ):
self._check_padding(numpify=UpperCamelCase_ )
def UpperCamelCase ( self ):
self._check_truncation(numpify=UpperCamelCase_ )
def UpperCamelCase ( self ):
self._check_truncation(numpify=UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :int = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ :Union[str, Any] = feat_extract.model_input_names[0]
lowercase_ :int = BatchFeature({input_name: speech_inputs} )
lowercase_ :List[str] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase_ :Optional[int] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_dict )
lowercase_ :str = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ :List[str] = feat_extract.model_input_names[0]
lowercase_ :List[Any] = BatchFeature({input_name: speech_inputs} )
lowercase_ :Dict = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase_ :List[Any] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.feat_extract_dict
lowercase_ :List[Any] = True
lowercase_ :Optional[Any] = self.feature_extraction_class(**UpperCamelCase_ )
lowercase_ :Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ :List[Any] = [len(UpperCamelCase_ ) for x in speech_inputs]
lowercase_ :Any = feat_extract.model_input_names[0]
lowercase_ :Dict = BatchFeature({input_name: speech_inputs} )
lowercase_ :List[str] = feat_extract.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.feat_extract_dict
lowercase_ :List[str] = True
lowercase_ :Dict = self.feature_extraction_class(**UpperCamelCase_ )
lowercase_ :List[str] = self.feat_extract_tester.prepare_inputs_for_common()
lowercase_ :List[str] = [len(UpperCamelCase_ ) for x in speech_inputs]
lowercase_ :List[Any] = feat_extract.model_input_names[0]
lowercase_ :Tuple = BatchFeature({input_name: speech_inputs} )
lowercase_ :Any = min(UpperCamelCase_ )
lowercase_ :Optional[int] = feat_extract.pad(
UpperCamelCase_ , padding='''max_length''' , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 252 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ :Any = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
lowercase_ :str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = BeautifulSoup(UpperCamelCase_ , '''html.parser''' )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = []
lowercase_ :List[Any] = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ :Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
lowercase_ , lowercase_ :Tuple = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = ''''''
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , UpperCamelCase_ ):
lowercase_ :Dict = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
lowercase_ :Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
lowercase_ :List[Any] = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
lowercase_ :Dict = [html_strings]
# Get nodes + xpaths
lowercase_ :List[Any] = []
lowercase_ :List[str] = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ :List[str] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
lowercase_ :str = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
lowercase_ :int = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase_ :Optional[int] = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 252 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7_68 ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = proj_size
__lowerCAmelCase : str = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = PaintByExampleMapper(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase : List[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : Tuple = self.model(pixel_values=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = clip_output.pooler_output
__lowerCAmelCase : Any = self.mapper(latent_states[:, None] )
__lowerCAmelCase : Dict = self.final_layer_norm(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.proj_out(_SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : Tuple = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase : Dict = config.hidden_size
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation_fn='gelu' , attention_bias=_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE )
] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for block in self.blocks:
__lowerCAmelCase : List[Any] = block(_SCREAMING_SNAKE_CASE )
return hidden_states | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0 ):
__lowercase = 1
__lowercase = 2
for i in range(2 , max_n + 1 ):
__lowercase = pre_numerator
__lowercase = 2 * i // 3 if i % 3 == 0 else 1
__lowercase = cur_numerator
__lowercase = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 351 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = "dpt"
def __init__(self ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=384 ,_lowerCamelCase=16 ,_lowerCamelCase=3 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=[2, 5, 8, 11] ,_lowerCamelCase="project" ,_lowerCamelCase=[4, 2, 1, 0.5] ,_lowerCamelCase=[96, 192, 384, 768] ,_lowerCamelCase=256 ,_lowerCamelCase=-1 ,_lowerCamelCase=False ,_lowerCamelCase=True ,_lowerCamelCase=0.4 ,_lowerCamelCase=255 ,_lowerCamelCase=0.1 ,_lowerCamelCase=[1, 1024, 24, 24] ,_lowerCamelCase=[0, 1] ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = hidden_size
__lowercase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
__lowercase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
__lowercase = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
__lowercase = BitConfig(**_lowerCamelCase )
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
__lowercase = backbone_config
else:
raise ValueError(
f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
__lowercase = backbone_featmap_shape
__lowercase = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
__lowercase = None
__lowercase = None
__lowercase = []
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
__lowercase = readout_type
__lowercase = reassemble_factors
__lowercase = neck_hidden_sizes
__lowercase = fusion_hidden_size
__lowercase = head_in_index
__lowercase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowercase = use_auxiliary_head
__lowercase = auxiliary_loss_weight
__lowercase = semantic_loss_ignore_index
__lowercase = semantic_classifier_dropout
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 217 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 16000 ):
'''simple docstring'''
UpperCAmelCase__ = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
UpperCAmelCase__ = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : Optional[str] = field(default=lowerCamelCase_ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
lowerCAmelCase_ : str = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowerCAmelCase_ : str = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowerCAmelCase_ : str = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
lowerCAmelCase_ : str = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ : Optional[int] = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ : float = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : str = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
lowerCAmelCase_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCAmelCase_ : Optional[bool] = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCAmelCase_ : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase__ = DatasetDict()
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'''{', '.join(raw_datasets['train'].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE__ : str ):
UpperCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase__ = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCAmelCase__ = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase__ = raw_datasets["""train"""].features[data_args.label_column_name].names
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = str(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = label
# Load the accuracy metric from the datasets package
UpperCAmelCase__ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : Any ):
UpperCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids )
UpperCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__ = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
UpperCAmelCase__ = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 346 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
UpperCAmelCase__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCAmelCase__ = corpus_without_punctuation.split("""\n""" )
UpperCAmelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 346 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__A = typing.Union[np.floataa, int, float] # noqa: UP007
def __a ( lowerCAmelCase_ : Vector ,lowerCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowerCAmelCase_ ) - np.asarray(lowerCAmelCase_ )) ** 2 ) )
def __a ( lowerCAmelCase_ : Vector ,lowerCAmelCase_ : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase_ ,lowerCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" ,number=1_00_00 ,globals=globals() ,) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" ,number=1_00_00 ,globals=globals() ,) )
benchmark()
| 354 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 277 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowerCamelCase : Union[str, Any] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted)) | 96 | 1 |
def _UpperCamelCase ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : str=10 ):
'''simple docstring'''
lowerCamelCase = []
for _ in range(lowerCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str]=10 ):
'''simple docstring'''
lowerCamelCase = []
for step in range(lowerCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = os.path.join(lowerCamelCase__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , lowerCamelCase__ )
lowerCamelCase = torch.load(lowerCamelCase__ )
scheduler.load_state_dict(lowerCamelCase__ )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A , A , A ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
lowerCamelCase = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A )
lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCamelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A , weight_decay=0.0 , relative_step=A , scale_parameter=A , warmup_init=A , )
for _ in range(10_00 ):
lowerCamelCase = criterion(A , A )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCamelCase : Optional[Any] = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCamelCase : str = 1_0
def __A ( self , A , A , A , A=None ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(A ) , len(A ) )
for a, b in zip(A , A ):
self.assertAlmostEqual(A , A , delta=A , msg=A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
lowerCamelCase , lowerCamelCase = data
lowerCamelCase = scheduler_func(self.optimizer , **A )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCamelCase = unwrap_schedule(A , self.num_steps )
self.assertListAlmostEqual(
A , A , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , )
lowerCamelCase = scheduler_func(self.optimizer , **A )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A ) # wrap to test picklability of the schedule
lowerCamelCase = unwrap_and_save_reload_schedule(A , self.num_steps )
self.assertListEqual(A , A , msg=F'failed for {scheduler_func} in save and reload' )
class __lowercase :
"""simple docstring"""
def __init__( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = fn
def __call__( self , *A , **A ) -> Dict:
'''simple docstring'''
return self.fn(*A , **A )
@classmethod
def __A ( self , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 252 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = ["image_processor", "tokenizer"]
UpperCamelCase : Dict = "BridgeTowerImageProcessor"
UpperCamelCase : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , A , A ) -> Optional[int]:
'''simple docstring'''
super().__init__(A , A )
def __call__( self , A , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
lowerCamelCase = self.image_processor(
A , return_tensors=A , do_normalize=A , do_center_crop=A , **A )
encoding.update(A )
return encoding
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
@property
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.tokenizer.model_input_names
lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 252 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fnet"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_20_00 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_tpu_fourier_optimizations
UpperCamelCase__ = tpu_short_seq_length
| 178 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase_ = ['''text''', '''image''', '''audio''']
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__a , __a ):
inputs.append(create_inputs(__a ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __magic_name__ ( __a : List ):
'''simple docstring'''
UpperCamelCase__ = []
for output in outputs:
if isinstance(__a , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(__a , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(__a , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class __A:
"""simple docstring"""
def UpperCAmelCase_ (self ):
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE_ ) , self.tool.outputs )
def UpperCAmelCase_ (self ):
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.outputs ):
UpperCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = create_inputs(self.tool.inputs )
UpperCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE_ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE_ )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(self.tool.outputs ) )
| 178 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.dummy_uncond_unet
__UpperCamelCase =KarrasVeScheduler()
__UpperCamelCase =KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pipe(num_inference_steps=2 , generator=A_ , output_type='numpy' ).images
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pipe(num_inference_steps=2 , generator=A_ , output_type='numpy' , return_dict=A_ )[0]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ='google/ncsnpp-celebahq-256'
__UpperCamelCase =UNetaDModel.from_pretrained(A_ )
__UpperCamelCase =KarrasVeScheduler()
__UpperCamelCase =KarrasVePipeline(unet=A_ , scheduler=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =pipe(num_inference_steps=20 , generator=A_ , output_type='numpy' ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 62 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[int] = filter_non_english
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__lowerCAmelCase: List[Any] = {}
__lowerCAmelCase: Dict = {}
for i, value in enumerate(UpperCamelCase__):
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: Union[str, Any] = i
__lowerCAmelCase: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"])
__lowerCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
with open(self.word_shape_file , "w" , encoding="utf-8") as word_shape_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
with open(self.word_pronunciation_file , "w" , encoding="utf-8") as word_pronunciation_writer:
json.dump(UpperCamelCase__ , UpperCamelCase__ , ensure_ascii=UpperCamelCase__)
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Union[str, Any] = tokenizer.tokenize("你好[SEP]你是谁")
self.assertListEqual(UpperCamelCase__ , ["你", "好", "[SEP]", "你", "是", "谁"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__) , [5, 6, 2, 5, 7, 8])
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
__lowerCAmelCase: int = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Optional[int])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: Tuple = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : str)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: List[str] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def lowercase_ ( self : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[int])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , strip_accents=UpperCamelCase__)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=UpperCamelCase__ , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def lowercase_ ( self : List[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__lowerCAmelCase: int = {}
for i, token in enumerate(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = i
__lowerCAmelCase: str = RoCBertWordpieceTokenizer(vocab=UpperCamelCase__ , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
def lowercase_ ( self : Optional[Any])-> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def lowercase_ ( self : Dict)-> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def lowercase_ ( self : Union[str, Any])-> str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
__lowerCAmelCase: Any = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
if self.test_rust_tokenizer:
__lowerCAmelCase: Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase__) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]])
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase: Tuple = tokenizer_r.encode_plus(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , )
__lowerCAmelCase: str = tokenizer_r.do_lower_case if hasattr(UpperCamelCase__ , "do_lower_case") else False
__lowerCAmelCase: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"])
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = ["的", "人", "有"]
__lowerCAmelCase: int = "".join(UpperCamelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Tuple = True
__lowerCAmelCase: str = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Any = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: List[str] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: int = False
__lowerCAmelCase: Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_p.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: str = tokenizer_r.convert_ids_to_tokens(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer_p.convert_ids_to_tokens(UpperCamelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase: Dict = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCamelCase__)
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
@slow
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
__lowerCAmelCase: str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__lowerCAmelCase: Dict = tokenizer.encode("你好" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.encode("你是谁" , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = self.get_tokenizers(do_lower_case=UpperCamelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__lowerCAmelCase: str = "你好,你是谁"
__lowerCAmelCase: Dict = tokenizer.tokenize(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.convert_tokens_to_shape_ids(UpperCamelCase__)
__lowerCAmelCase: Tuple = tokenizer.convert_tokens_to_pronunciation_ids(UpperCamelCase__)
__lowerCAmelCase: Dict = tokenizer.prepare_for_model(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = tokenizer.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__)
| 217 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase : Union[str, Any] = 25_00_04
lowerCAmelCase : int = 25_00_20
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = MBartaaTokenizer
__magic_name__ = MBartaaTokenizerFast
__magic_name__ = True
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = '<s>'
_lowerCAmelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case__ ) , 1054 )
def a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MBartaaTokenizer(snake_case__ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=snake_case__ )
_lowerCAmelCase : Any = tokenizer.tokenize('This is a test' )
self.assertListEqual(snake_case__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[str] = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Any = tokenizer_p.save_pretrained(snake_case__ )
# Checks it save with the same files
self.assertSequenceEqual(snake_case__ , snake_case__ )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : int = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ )
_lowerCAmelCase : Tuple = tokenizer_p.save_pretrained(snake_case__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : int = tokenizer_r.from_pretrained(snake_case__ )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(snake_case__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(snake_case__ , snake_case__ ) )
shutil.rmtree(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = "facebook/mbart-large-50-one-to-many-mmt"
__magic_name__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__magic_name__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__magic_name__ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def a ( cls ):
'''simple docstring'''
_lowerCAmelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
_lowerCAmelCase : Dict = 1
return cls
def a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertIn(snake_case__ , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
_lowerCAmelCase : List[str] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
_lowerCAmelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
self.assertNotIn(self.tokenizer.eos_token , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , snake_case__ )
_lowerCAmelCase : List[str] = 10
_lowerCAmelCase : Any = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0]
self.assertEqual(ids[0] , snake_case__ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
def a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_0053, 25_0001] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case__ )
_lowerCAmelCase : Tuple = MBartaaTokenizer.from_pretrained(snake_case__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors='pt' )
_lowerCAmelCase : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
_lowerCAmelCase : int = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_lowerCAmelCase : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , snake_case__ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors='pt' )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors='pt' )
_lowerCAmelCase : List[Any] = targets['input_ids']
_lowerCAmelCase : Any = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(snake_case__ ) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 25 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mvp"
__magic_name__ = ["past_key_values"]
__magic_name__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , snake_case__=5_0267 , snake_case__=1024 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=12 , snake_case__=4096 , snake_case__=16 , snake_case__=0.0 , snake_case__=0.0 , snake_case__="gelu" , snake_case__=1024 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=0.0 , snake_case__=False , snake_case__=True , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=True , snake_case__=2 , snake_case__=2 , snake_case__=False , snake_case__=100 , snake_case__=800 , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = d_model
_lowerCAmelCase : Optional[int] = encoder_ffn_dim
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Any = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : int = decoder_attention_heads
_lowerCAmelCase : Union[str, Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : List[str] = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Optional[int] = classifier_dropout
_lowerCAmelCase : List[Any] = use_cache
_lowerCAmelCase : Optional[int] = encoder_layers
_lowerCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Optional[Any] = use_prompt
_lowerCAmelCase : Optional[Any] = prompt_length
_lowerCAmelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , forced_eos_token_id=snake_case__ , **snake_case__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , snake_case__ ):
_lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 25 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_A : List[Any] = None
_A : List[str] = logging.get_logger(__name__)
_A : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_A : Dict = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_A : Optional[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_A : Optional[Any] = "▁"
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : int = ["""input_ids""", """token_type_ids"""]
_SCREAMING_SNAKE_CASE : List[Any] = FNetTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict="<unk>" , SCREAMING_SNAKE_CASE__ : Any="[SEP]" , SCREAMING_SNAKE_CASE__ : List[str]="<pad>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : List[str]="[MASK]" , **SCREAMING_SNAKE_CASE__ : Any , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase = (
AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case , normalized=_snake_case )
if isinstance(_snake_case , _snake_case )
else mask_token
)
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = remove_space
__lowerCAmelCase = keep_accents
__lowerCAmelCase = vocab_file
__lowerCAmelCase = False if not self.vocab_file else True
def a ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 229 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ :Tuple = logging.get_logger(__name__)
a_ :Union[str, Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deberta-v2"""
def __init__( self : Union[str, Any], _snake_case : Dict=1_2_8_1_0_0, _snake_case : Any=1_5_3_6, _snake_case : Tuple=2_4, _snake_case : int=2_4, _snake_case : Optional[int]=6_1_4_4, _snake_case : Optional[int]="gelu", _snake_case : Optional[int]=0.1, _snake_case : List[str]=0.1, _snake_case : str=5_1_2, _snake_case : Optional[int]=0, _snake_case : Optional[int]=0.0_2, _snake_case : Dict=1e-7, _snake_case : int=False, _snake_case : Any=-1, _snake_case : List[str]=0, _snake_case : Tuple=True, _snake_case : Any=None, _snake_case : Union[str, Any]=0, _snake_case : Tuple="gelu", **_snake_case : Union[str, Any], ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : Dict = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : List[str] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[int] = relative_attention
snake_case__ : Tuple = max_relative_positions
snake_case__ : Union[str, Any] = pad_token_id
snake_case__ : Optional[int] = position_biased_input
# Backwards compatibility
if type(_snake_case ) == str:
snake_case__ : int = [x.strip() for x in pos_att_type.lower().split('|' )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Optional[int] = layer_norm_eps
snake_case__ : Optional[int] = kwargs.get('pooler_hidden_size', _snake_case )
snake_case__ : int = pooler_dropout
snake_case__ : str = pooler_hidden_act
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def lowercase_ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ : int = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase_ ( self : Dict ) ->int:
return 1_2
def lowercase_ ( self : Tuple, _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], _snake_case : int = -1, _snake_case : int = -1, _snake_case : int = -1, _snake_case : bool = False, _snake_case : Optional["TensorType"] = None, _snake_case : int = 3, _snake_case : int = 4_0, _snake_case : int = 4_0, _snake_case : "PreTrainedTokenizerBase" = None, ) ->Mapping[str, Any]:
snake_case__ : Union[str, Any] = super().generate_dummy_inputs(preprocessor=_snake_case, framework=_snake_case )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 277 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowerCamelCase_ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCamelCase_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCamelCase_ = sorted(arg_to_scheduler.keys())
lowerCamelCase_ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class _SCREAMING_SNAKE_CASE( pl.LightningModule ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__="base" ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ,**SCREAMING_SNAKE_CASE__ ,) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = 0
__SCREAMING_SNAKE_CASE :List[Any] = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE :List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE :Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'''num_labels''': num_labels} if num_labels is not None else {}) ,cache_dir=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
else:
__SCREAMING_SNAKE_CASE :PretrainedConfig = config
__SCREAMING_SNAKE_CASE :Any = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
assert hasattr(self.config ,SCREAMING_SNAKE_CASE__ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config ,SCREAMING_SNAKE_CASE__ ,getattr(self.hparams ,SCREAMING_SNAKE_CASE__ ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE :List[str] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=SCREAMING_SNAKE_CASE__ ,)
else:
__SCREAMING_SNAKE_CASE :PreTrainedTokenizer = tokenizer
__SCREAMING_SNAKE_CASE :Dict = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE :Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=SCREAMING_SNAKE_CASE__ ,)
else:
__SCREAMING_SNAKE_CASE :int = model
def _UpperCamelCase ( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE :Any = get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE :List[Any] = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.model
__SCREAMING_SNAKE_CASE :Dict = ['''bias''', '''LayerNorm.weight''']
__SCREAMING_SNAKE_CASE :int = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE :Any = Adafactor(
SCREAMING_SNAKE_CASE__ ,lr=self.hparams.learning_rate ,scale_parameter=SCREAMING_SNAKE_CASE__ ,relative_step=SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Optional[int] = AdamW(
SCREAMING_SNAKE_CASE__ ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE :Tuple = optimizer
__SCREAMING_SNAKE_CASE :Optional[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return self.validation_step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.validation_end(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE :List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE :Dict = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dataloader('''train''' ,self.hparams.train_batch_size ,shuffle=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = len(self.train_dataloader().dataset )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ) -> Tuple:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.train_loader
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return self.get_dataloader('''dev''' ,self.hparams.eval_batch_size ,shuffle=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.get_dataloader('''test''' ,self.hparams.eval_batch_size ,shuffle=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir ,'''cached_{}_{}_{}'''.format(
SCREAMING_SNAKE_CASE__ ,list(filter(SCREAMING_SNAKE_CASE__ ,self.hparams.model_name_or_path.split('''/''' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.output_dir.joinpath('''best_tfmr''' )
__SCREAMING_SNAKE_CASE :List[str] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' ,default=SCREAMING_SNAKE_CASE__ ,type=SCREAMING_SNAKE_CASE__ ,required=SCREAMING_SNAKE_CASE__ ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--config_name''' ,default='''''' ,type=SCREAMING_SNAKE_CASE__ ,help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' ,default=SCREAMING_SNAKE_CASE__ ,type=SCREAMING_SNAKE_CASE__ ,help='''Pretrained tokenizer name or path if not the same as model_name''' ,)
parser.add_argument(
'''--cache_dir''' ,default=str(Path(SCREAMING_SNAKE_CASE__ ).parent / '''test_run''' / '''cache''' ) ,type=SCREAMING_SNAKE_CASE__ ,help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' ,)
parser.add_argument(
'''--encoder_layerdrop''' ,type=SCREAMING_SNAKE_CASE__ ,help='''Encoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--decoder_layerdrop''' ,type=SCREAMING_SNAKE_CASE__ ,help='''Decoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--dropout''' ,type=SCREAMING_SNAKE_CASE__ ,help='''Dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--attention_dropout''' ,type=SCREAMING_SNAKE_CASE__ ,help='''Attention dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument('''--learning_rate''' ,default=5E-5 ,type=SCREAMING_SNAKE_CASE__ ,help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' ,default='''linear''' ,choices=SCREAMING_SNAKE_CASE__ ,metavar=SCREAMING_SNAKE_CASE__ ,type=SCREAMING_SNAKE_CASE__ ,help='''Learning rate scheduler''' ,)
parser.add_argument('''--weight_decay''' ,default=0.0 ,type=SCREAMING_SNAKE_CASE__ ,help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' ,default=1E-8 ,type=SCREAMING_SNAKE_CASE__ ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=SCREAMING_SNAKE_CASE__ ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' ,default=4 ,type=SCREAMING_SNAKE_CASE__ ,help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' ,dest='''max_epochs''' ,default=3 ,type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('''--train_batch_size''' ,default=32 ,type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('''--eval_batch_size''' ,default=32 ,type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('''--adafactor''' ,action='''store_true''' )
class _SCREAMING_SNAKE_CASE( pl.Callback ):
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE( pl.Callback ):
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE__ )
class _SCREAMING_SNAKE_CASE( pl.Callback ):
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = trainer.lr_schedulers[0]['''scheduler''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
__SCREAMING_SNAKE_CASE :str = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE__ ,str(metrics[key] ) ) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
__SCREAMING_SNAKE_CASE :Any = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.join(pl_module.hparams.output_dir ,'''test_results.txt''' )
with open(SCREAMING_SNAKE_CASE__ ,'''w''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE__ ,str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE__ ,str(metrics[key] ) ) )
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(a_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=a_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=a_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=a_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(a_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=a_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __lowerCamelCase ( a_ : BaseTransformer , a_ : argparse.Namespace , a_ : Union[str, Any]=None , a_ : Tuple=True , a_ : Optional[int]=[] , a_ : Any=None , a_ : List[Any]=None , **a_ : Optional[int] , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE :int = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a_ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE :List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a_ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE :Any = LoggingCallback()
__SCREAMING_SNAKE_CASE :Optional[Any] = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE :int = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE :Tuple = '''auto'''
__SCREAMING_SNAKE_CASE :List[str] = '''ddp'''
__SCREAMING_SNAKE_CASE :Any = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE :Dict = None
__SCREAMING_SNAKE_CASE :Optional[Any] = '''auto'''
__SCREAMING_SNAKE_CASE :List[Any] = pl.Trainer.from_argparse_args(
a_ , weights_summary=a_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a_ , val_check_interval=1 , num_sanity_val_steps=2 , **a_ , )
if args.do_train:
trainer.fit(a_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer | 239 |
"""simple docstring"""
from torch import nn
class _SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE :Tuple = class_size
__SCREAMING_SNAKE_CASE :str = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
__SCREAMING_SNAKE_CASE :Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.mlp(SCREAMING_SNAKE_CASE__ )
return logits | 239 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _lowerCamelCase( lowercase__ ) -> List[int]:
'''simple docstring'''
if isinstance(a__ , np.ndarray ):
return list(tensor.shape )
__lowercase= tf.shape(a__ )
if tensor.shape == tf.TensorShape(a__ ):
return dynamic
__lowercase= tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(a__ )]
def _lowerCamelCase( lowercase__ , lowercase__ = None , lowercase__ = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=a__ , name=a__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__=1E-5 , lowercase__=-1 ) -> Dict:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(a__ , a__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
__lowercase, __lowercase= tf.nn.moments(a__ , axes=[axis] , keepdims=a__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__lowercase= [1] * inputs.shape.rank
__lowercase= shape_list(a__ )[axis]
__lowercase= tf.reshape(a__ , a__ )
__lowercase= tf.reshape(a__ , a__ )
# Compute layer normalization using the batch_normalization
# function.
__lowercase= tf.nn.batch_normalization(
a__ , a__ , a__ , offset=a__ , scale=a__ , variance_epsilon=a__ , )
return outputs
def _lowerCamelCase( lowercase__ , lowercase__=0 , lowercase__=-1 ) -> Any:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__lowercase= tf.shape(a__ )
__lowercase= tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__lowercase= tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(a__ , a__ )
def _lowerCamelCase( lowercase__ ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(a__ , tf.Tensor ):
__lowercase= tf.convert_to_tensor(a__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__lowercase= encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__lowercase= encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__lowercase= (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
a__ , tf.cast(a__ , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(a__ )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase= 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__lowercase= [x for x in data if len(a__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__lowercase= np.asarray(a__ )
__lowercase= 1
__lowercase= np.array_split(a__ , a__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__lowercase= np.array_split(a__ , a__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(a__ ):
__lowercase= chunk_data
else:
__lowercase= data
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
if name in group.attrs:
__lowercase= [n.decode('utf8' ) if hasattr(a__ , 'decode' ) else n for n in group.attrs[name]]
else:
__lowercase= []
__lowercase= 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(a__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
def _expand_single_ad_tensor(lowercase__ ):
if isinstance(a__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(a__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , a__ )
| 295 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=3_2 , __UpperCamelCase=3 , __UpperCamelCase=1_0 , __UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase_ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModel(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : Any = False
A__ : List[Any] = False
A__ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ) -> Tuple:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122 | 0 |
"""simple docstring"""
import os
a : str = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : int= 0
lowercase__ : str= 0
while index < len(A ) - 1:
lowercase__ : Any= SYMBOLS[numerals[index]]
lowercase__ : int= SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : List[str]= ""
lowercase__ : List[Any]= num // 1_000
numerals += m_count * "M"
num %= 1_000
lowercase__ : List[str]= num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowercase__ : List[str]= num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase__(A = "/p089_roman.txt" ) ->int:
"""simple docstring"""
lowercase__ : Any= 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
lowercase__ : str= filea.readlines()
for line in lines:
lowercase__ : int= line.strip()
lowercase__ : int= parse_roman_numerals(A )
lowercase__ : List[Any]= generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 351 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= "A painting of a squirrel eating a burger "
lowercase__ : Optional[Any]= torch.manual_seed(0 )
lowercase__ : List[str]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase__ : Optional[Any]= VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : Any= generator.manual_seed(0 )
lowercase__ : Tuple= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase__ : List[str]= "A painting of a squirrel eating a burger "
lowercase__ : Union[str, Any]= torch.manual_seed(0 )
lowercase__ : Optional[Any]= pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : List[str]= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[int]= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 150 | 0 |
import re
def __UpperCAmelCase ( a_):
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_)]
def __UpperCAmelCase ( a_):
snake_case_ = split_input(str_)
return "".join(
[''.join([char.capitalize() for char in sub_str]) for sub_str in string_split])
def __UpperCAmelCase ( a_ , a_ , a_):
try:
snake_case_ = split_input(a_)
if upper:
snake_case_ = ''.join(
[
separator.join([char.upper() for char in sub_str])
for sub_str in string_split
])
else:
snake_case_ = ''.join(
[
separator.join([char.lower() for char in sub_str])
for sub_str in string_split
])
return res_str
except IndexError:
return "not valid string"
def __UpperCAmelCase ( a_):
return to_simple_case(a_)
def __UpperCAmelCase ( a_):
try:
snake_case_ = to_simple_case(a_)
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __UpperCAmelCase ( a_ , a_):
return to_complex_case(a_ , a_ , '_')
def __UpperCAmelCase ( a_ , a_):
return to_complex_case(a_ , a_ , '-')
if __name__ == "__main__":
__import__("doctest").testmod()
| 178 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( a_):
return (data["data"], data["target"])
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = XGBRegressor(verbosity=0 , random_state=42)
xgb.fit(a_ , a_)
# Predict target for test data
snake_case_ = xgb.predict(a_)
snake_case_ = predictions.reshape(len(a_) , 1)
return predictions
def __UpperCAmelCase ( ):
snake_case_ = fetch_california_housing()
snake_case_ , snake_case_ = data_handling(a_)
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
a_ , a_ , test_size=0.25 , random_state=1)
snake_case_ = xgboost(a_ , a_ , a_)
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(a_ , a_)}''')
print(f'''Mean Square Error : {mean_squared_error(a_ , a_)}''')
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 178 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __snake_case :
lowerCAmelCase_ = LEDConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = "gelu"
def __init__( self : Any , _lowercase : Tuple , _lowercase : str=13 , _lowercase : Optional[int]=7 , _lowercase : Optional[Any]=True , _lowercase : Dict=False , _lowercase : Union[str, Any]=99 , _lowercase : Any=32 , _lowercase : int=2 , _lowercase : List[str]=4 , _lowercase : Optional[int]=37 , _lowercase : Union[str, Any]=0.1 , _lowercase : str=0.1 , _lowercase : Union[str, Any]=20 , _lowercase : List[str]=2 , _lowercase : Optional[int]=1 , _lowercase : Dict=0 , _lowercase : List[str]=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
SCREAMING_SNAKE_CASE__ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE__ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE__ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(_lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = tf.concat(
[tf.zeros_like(_lowercase )[:, :-1], tf.ones_like(_lowercase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = global_attention_mask
return config, inputs_dict
def __a ( self : Tuple , _lowercase : int , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDModel(config=_lowercase ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowercase , _lowercase , rtol=1E-3 )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase )
def __a ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowercase )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = tf.zeros_like(inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
SCREAMING_SNAKE_CASE__ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE__ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def __a ( self : List[Any] ):
"""simple docstring"""
pass
def __a ( self : List[str] ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__lowerCamelCase : List[str] = 1e-4
@slow
@require_tf
class __snake_case ( unittest.TestCase ):
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
SCREAMING_SNAKE_CASE__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ = (1, 10_24, 7_68)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
SCREAMING_SNAKE_CASE__ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
SCREAMING_SNAKE_CASE__ = prepare_led_inputs_dict(model.config , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-3 , rtol=1E-3 )
| 204 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCAmelCase__ : Optional[int] = TypeVar('KT')
UpperCAmelCase__ : Any = TypeVar('VT')
class lowerCAmelCase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = "root" , SCREAMING_SNAKE_CASE__ = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = key
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : list[Node[KT, VT]] = []
def __repr__(self ) -> str:
"""simple docstring"""
return F'''Node({self.key}: {self.value})'''
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.forward )
class lowerCAmelCase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = 0.5 , SCREAMING_SNAKE_CASE__ = 16 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = p
SCREAMING_SNAKE_CASE__ : List[Any] = max_level
def __str__(self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = list(self )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return F'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE__ : Dict = max((len(str(SCREAMING_SNAKE_CASE__ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , 4 ) + 4
SCREAMING_SNAKE_CASE__ : Tuple = self.head
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : List[Any] = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE__ , """-""" ) + """* """ * len(SCREAMING_SNAKE_CASE__ ) )
lines.append(""" """ * label_size + """| """ * len(SCREAMING_SNAKE_CASE__ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(SCREAMING_SNAKE_CASE__ , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = node.forward
lines.append("""None""".ljust(SCREAMING_SNAKE_CASE__ ) + """* """ * len(SCREAMING_SNAKE_CASE__ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(SCREAMING_SNAKE_CASE__ )
def __iter__(self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : Tuple = node.forward[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : List[str] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(SCREAMING_SNAKE_CASE__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
for i, update_node in enumerate(SCREAMING_SNAKE_CASE__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : Dict = node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = update_node.forward[:i]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : str = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , SCREAMING_SNAKE_CASE__ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : Optional[Any] = level
SCREAMING_SNAKE_CASE__ : int = Node(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Dict = new_node
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> VT | None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._locate_node(SCREAMING_SNAKE_CASE__ )
if node is not None:
return node.value
return None
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = SkipList()
skip_list.insert("""Key1""" ,3 )
skip_list.insert("""Key2""" ,12 )
skip_list.insert("""Key3""" ,41 )
skip_list.insert("""Key4""" ,-19 )
SCREAMING_SNAKE_CASE__ : Optional[int] = skip_list.head
SCREAMING_SNAKE_CASE__ : Tuple = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : List[Any] = node.forward[0]
SCREAMING_SNAKE_CASE__ : int = node.value
assert len(_snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = SkipList()
skip_list.insert("""Key1""" ,10 )
skip_list.insert("""Key1""" ,12 )
skip_list.insert("""Key5""" ,7 )
skip_list.insert("""Key7""" ,10 )
skip_list.insert("""Key10""" ,5 )
skip_list.insert("""Key7""" ,7 )
skip_list.insert("""Key5""" ,5 )
skip_list.insert("""Key10""" ,10 )
SCREAMING_SNAKE_CASE__ : List[str] = skip_list.head
SCREAMING_SNAKE_CASE__ : Dict = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : List[str] = node.forward[0]
SCREAMING_SNAKE_CASE__ : Tuple = node.value
if len(_snake_case ) != 4:
print()
assert len(_snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SkipList()
assert skip_list.find("""Some key""" ) is None
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = SkipList()
skip_list.insert("""Key2""" ,20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" ,10 )
skip_list.insert("""Key2""" ,8 )
skip_list.insert("""V""" ,13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = SkipList()
skip_list.insert("""Key1""" ,12 )
skip_list.insert("""V""" ,13 )
skip_list.insert("""X""" ,14 )
skip_list.insert("""Key2""" ,15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Any = SkipList()
skip_list.insert("""Key1""" ,12 )
skip_list.insert("""V""" ,13 )
skip_list.insert("""X""" ,14 )
skip_list.insert("""Key2""" ,15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" ,12 )
skip_list.insert("""V""" ,13 )
skip_list.insert("""X""" ,142 )
skip_list.insert("""Key2""" ,15 )
skip_list.delete("""X""" )
def traverse_keys(_snake_case ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase_ ( ):
def is_sorted(_snake_case ):
return all(next_item >= item for item, next_item in zip(_snake_case ,lst[1:] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SkipList()
for i in range(10 ):
skip_list.insert(_snake_case ,_snake_case )
assert is_sorted(list(_snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_snake_case ) )
skip_list.insert(-12 ,-12 )
skip_list.insert(77 ,77 )
assert is_sorted(list(_snake_case ) )
def lowercase_ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = SkipList()
skip_list.insert(2 ,"""2""" )
skip_list.insert(4 ,"""4""" )
skip_list.insert(6 ,"""4""" )
skip_list.insert(4 ,"""5""" )
skip_list.insert(8 ,"""4""" )
skip_list.insert(9 ,"""4""" )
skip_list.delete(4 )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
_UpperCAmelCase : Optional[int] = parser.parse_args()
return args.f
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
_UpperCAmelCase : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCAmelCase__ )
_UpperCAmelCase : str = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ ) | 17 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__a = datasets.utils.logging.get_logger(__name__)
__a = ['names', 'prefix']
__a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__a = ['encoding_errors', 'on_bad_lines']
__a = ['date_format']
@dataclass
class A__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCamelCase_ : str = ","
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[List[str]] = None
UpperCamelCase_ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase_ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase_ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : Optional[list] = None
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[Union[int, List[int]]] = None
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Optional[Union[str, List[str]]] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : bool = True
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = "."
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : str = '"'
UpperCamelCase_ : int = 0
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = True
UpperCamelCase_ : int = 0
UpperCamelCase_ : bool = True
UpperCamelCase_ : bool = False
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : int = 1_00_00
UpperCamelCase_ : Optional[datasets.Features] = None
UpperCamelCase_ : Optional[str] = "strict"
UpperCamelCase_ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase_ : Optional[str] = None
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase : Any = self.delimiter
if self.column_names is not None:
_UpperCAmelCase : List[Any] = self.column_names
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ : int = CsvConfig
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_UpperCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
_UpperCAmelCase : int = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [files]
_UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = [files]
_UpperCAmelCase : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase : Tuple = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase : Any = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase : int = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
_UpperCAmelCase : Optional[Any] = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise | 17 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> list:
lowercase_ : Optional[Any] = word.split()
def justify(UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> str:
lowercase_ : Union[str, Any] = max_width - width
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase_ : List[str] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase_ : Dict = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase_ : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCAmelCase__ ):
num_spaces_between_words_list[i] += 1
lowercase_ : Dict = []
for i in range(UpperCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCAmelCase__ )
lowercase_ : List[str] = []
lowercase_ : list[str] = []
lowercase_ : Any = 0
for word in words:
if width + len(UpperCAmelCase__ ) + len(UpperCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCAmelCase__ )
width += len(UpperCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
# reset new line and new width
lowercase_ , lowercase_ : Any = [word], len(UpperCAmelCase__ )
lowercase_ : Any = max_width - width - len(UpperCAmelCase__ )
answer.append(""" """.join(UpperCAmelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 239 | '''simple docstring'''
import os
def lowerCamelCase ( UpperCAmelCase__ : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) as input_file:
lowercase_ : str = [
[int(UpperCAmelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowercase_ : Optional[Any] = len(UpperCAmelCase__ )
lowercase_ : Any = len(matrix[0] )
lowercase_ : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )]
for i in range(UpperCAmelCase__ ):
lowercase_ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase__ ):
for i in range(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase__ ):
lowercase_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase_ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( a ) -> int:
_A: list[list[int]] = [[0 for _ in range(a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_A: Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase__ : Any = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
UpperCAmelCase__ : Any = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 363 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
UpperCAmelCase__ : Dict = {
'ctrl': 256,
}
UpperCAmelCase__ : Any = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Optional[int] = set()
_A: Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: Any = char
_A: Dict = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = CONTROL_CODES
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any]="<unk>" , **lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: str = json.load(lowerCAmelCase_ )
_A: List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: int = merges_handle.read().split('''\n''' )[1:-1]
_A: List[Any] = [tuple(merge.split() ) for merge in merges]
_A: List[str] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = tuple(lowerCAmelCase_ )
_A: Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
if not pairs:
return token
while True:
_A: Optional[int] = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Any = bigram
_A: int = []
_A: int = 0
while i < len(lowerCAmelCase_ ):
try:
_A: Any = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A: Optional[int] = j
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Dict = tuple(lowerCAmelCase_ )
_A: Union[str, Any] = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Tuple = get_pairs(lowerCAmelCase_ )
_A: Optional[int] = '''@@ '''.join(lowerCAmelCase_ )
_A: List[str] = word[:-4]
_A: Optional[Any] = word
return word
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = []
_A: List[str] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Tuple ):
"""simple docstring"""
_A: Any = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: List[str] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: List[Any] = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: str = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Tuple = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 301 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCamelCase : Tuple = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = test_results.split(""" """ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE__ = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , _UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
SCREAMING_SNAKE_CASE__ = line
SCREAMING_SNAKE_CASE__ = False
return failures
class __snake_case :
def __init__( self : int , _lowercase : List[str] , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = title
SCREAMING_SNAKE_CASE__ = doc_test_results["""time_spent"""].split(""",""" )[0]
SCREAMING_SNAKE_CASE__ = doc_test_results["""success"""]
SCREAMING_SNAKE_CASE__ = doc_test_results["""failures"""]
SCREAMING_SNAKE_CASE__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE__ = doc_test_results
@property
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._time_spent]
SCREAMING_SNAKE_CASE__ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE__ = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowercase ) == 1:
SCREAMING_SNAKE_CASE__ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(_lowercase )}h{int(_lowercase )}m{int(_lowercase )}s"""
@property
def __a ( self : Dict ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __a ( self : str ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 40
SCREAMING_SNAKE_CASE__ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(_lowercase , _lowercase )}
SCREAMING_SNAKE_CASE__ = """"""
for category, failures in category_failures.items():
if len(_lowercase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowercase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowercase )
@staticmethod
def __a ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(_lowercase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=_lowercase , )
def __a ( self : Optional[Any] ):
"""simple docstring"""
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE__ = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
SCREAMING_SNAKE_CASE__ = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=_lowercase , )
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """"""
for key, value in failures.items():
SCREAMING_SNAKE_CASE__ = value[:2_00] + """ [Truncated]""" if len(_lowercase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
SCREAMING_SNAKE_CASE__ = job_name
SCREAMING_SNAKE_CASE__ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE__ = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __a ( self : str ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
SCREAMING_SNAKE_CASE__ = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
SCREAMING_SNAKE_CASE__ = sorted(self.doc_test_results.items() , key=lambda _lowercase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
SCREAMING_SNAKE_CASE__ = f"""*Num failures* :{len(job_result['failed'] )} \n"""
SCREAMING_SNAKE_CASE__ = job_result["""failures"""]
SCREAMING_SNAKE_CASE__ = self.get_reply_blocks(_lowercase , _lowercase , _lowercase , text=_lowercase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=_lowercase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.environ["""GITHUB_RUN_ID"""]
SCREAMING_SNAKE_CASE__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE__ = requests.get(_UpperCamelCase ).json()
SCREAMING_SNAKE_CASE__ = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
SCREAMING_SNAKE_CASE__ = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _UpperCamelCase )
return {}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
if os.path.exists(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}.""" ) from e
return _artifact
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
class __snake_case :
def __init__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = []
def __str__( self : Tuple ):
"""simple docstring"""
return self.name
def __a ( self : Optional[Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
self.paths.append({"""name""": self.name, """path""": path} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE__ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE__ = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
__lowerCamelCase : List[str] = get_job_links()
__lowerCamelCase : str = retrieve_available_artifacts()
__lowerCamelCase : List[Any] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCamelCase : List[Any] = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCamelCase : Dict = github_actions_job_links.get('''run_doctests''')
__lowerCamelCase : int = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
__lowerCamelCase : List[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = handle_test_results(artifact['''stats'''])
__lowerCamelCase : Optional[int] = failed
__lowerCamelCase : Tuple = success
__lowerCamelCase : Optional[Any] = time_spent[1:-1] + ''', '''
__lowerCamelCase : Optional[int] = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__lowerCamelCase : Optional[int] = line.replace('''FAILED ''', '''''')
__lowerCamelCase : List[Any] = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__lowerCamelCase , __lowerCamelCase : List[Any] = line.split('''::''')
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCamelCase : Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCamelCase : List[str] = all_failures[test] if test in all_failures else '''N/A'''
__lowerCamelCase : Dict = failure
break
__lowerCamelCase : Dict = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 219 | """simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case = test_results.split(' ' )
snake_case = 0
snake_case = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case = {}
snake_case = None
snake_case = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , _UpperCamelCase ):
snake_case = True
snake_case = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case = line
snake_case = False
return failures
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = title
snake_case = doc_test_results['time_spent'].split(',' )[0]
snake_case = doc_test_results['success']
snake_case = doc_test_results['failures']
snake_case = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case = doc_test_results
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self._time_spent]
snake_case = 0
for time in time_spent:
snake_case = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase ) == 1:
snake_case = [0, 0, time_parts[0]]
snake_case ,snake_case ,snake_case = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
snake_case ,snake_case ,snake_case = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return F"""{int(lowerCAmelCase )}h{int(lowerCAmelCase )}m{int(lowerCAmelCase )}s"""
@property
def snake_case ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = 40
snake_case = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase , lowerCAmelCase )}
snake_case = ''
for category, failures in category_failures.items():
if len(lowerCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def snake_case ( self ):
"""simple docstring"""
snake_case = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase )
@staticmethod
def snake_case ( ):
"""simple docstring"""
snake_case = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(lowerCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
snake_case = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
snake_case = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=lowerCAmelCase , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''
for key, value in failures.items():
snake_case = value[:2_00] + ' [Truncated]' if len(lowerCAmelCase ) > 2_50 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
snake_case = job_name
snake_case = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
snake_case = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
snake_case = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
snake_case = F"""*Num failures* :{len(job_result["failed"] )} \n"""
snake_case = job_result['failures']
snake_case = self.get_reply_blocks(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text=lowerCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"""Results for {job}""" , blocks=lowerCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.environ['GITHUB_RUN_ID']
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase ).json()
snake_case = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , _UpperCamelCase )
return {}
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]:
"""simple docstring"""
snake_case = {}
if os.path.exists(_UpperCamelCase ):
snake_case = os.listdir(_UpperCamelCase )
for file in files:
try:
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f:
snake_case = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}.""" ) from e
return _artifact
def lowerCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = name
snake_case = []
def __str__( self ):
"""simple docstring"""
return self.name
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
self.paths.append({'name': self.name, 'path': path} )
snake_case = {}
snake_case = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case = directory
if artifact_name not in _available_artifacts:
snake_case = Artifact(_UpperCamelCase )
_available_artifacts[artifact_name].add_path(_UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 150 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=UpperCamelCase__ ).to(UpperCamelCase__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('''google/mt5-small''' )
__UpperCamelCase =tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__UpperCamelCase =tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__UpperCamelCase =model(input_ids.to(UpperCamelCase__ ) , labels=labels.to(UpperCamelCase__ ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 356 | """simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = BartphoTokenizer
lowercase__ = False
lowercase__ = True
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCamelCase =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''This is a<unk><unk> test'''
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__UpperCamelCase ='''This is a là test'''
__UpperCamelCase ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 85 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''align_text_model'''
def __init__( self : Dict , A_ : str=30522 , A_ : List[str]=768 , A_ : List[str]=12 , A_ : int=12 , A_ : List[Any]=3072 , A_ : Optional[Any]="gelu" , A_ : Any=0.1 , A_ : str=0.1 , A_ : Dict=512 , A_ : int=2 , A_ : List[Any]=0.02 , A_ : List[Any]=1E-12 , A_ : Optional[Any]=0 , A_ : List[Any]="absolute" , A_ : Optional[int]=True , **A_ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = pad_token_id
@classmethod
def a__ ( cls : Optional[Any] , A_ : Union[str, os.PathLike] , **A_ : Optional[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCamelCase_ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''align_vision_model'''
def __init__( self : int , A_ : int = 3 , A_ : int = 600 , A_ : float = 2.0 , A_ : float = 3.1 , A_ : int = 8 , A_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , A_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , A_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , A_ : List[int] = [] , A_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , A_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , A_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , A_ : float = 0.25 , A_ : str = "swish" , A_ : int = 2560 , A_ : str = "mean" , A_ : float = 0.02 , A_ : float = 0.001 , A_ : float = 0.99 , A_ : float = 0.2 , **A_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = width_coefficient
lowerCamelCase_ = depth_coefficient
lowerCamelCase_ = depth_divisor
lowerCamelCase_ = kernel_sizes
lowerCamelCase_ = in_channels
lowerCamelCase_ = out_channels
lowerCamelCase_ = depthwise_padding
lowerCamelCase_ = strides
lowerCamelCase_ = num_block_repeats
lowerCamelCase_ = expand_ratios
lowerCamelCase_ = squeeze_expansion_ratio
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dim
lowerCamelCase_ = pooling_type
lowerCamelCase_ = initializer_range
lowerCamelCase_ = batch_norm_eps
lowerCamelCase_ = batch_norm_momentum
lowerCamelCase_ = drop_connect_rate
lowerCamelCase_ = sum(A_ ) * 4
@classmethod
def a__ ( cls : Dict , A_ : Union[str, os.PathLike] , **A_ : str ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A_ )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCamelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''align'''
UpperCamelCase = True
def __init__( self : int , A_ : List[Any]=None , A_ : Union[str, Any]=None , A_ : Dict=640 , A_ : Tuple=1.0 , A_ : List[str]=0.02 , **A_ : Optional[Any] , ) -> str:
"""simple docstring"""
super().__init__(**A_ )
if text_config is None:
lowerCamelCase_ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCamelCase_ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCamelCase_ = AlignTextConfig(**A_ )
lowerCamelCase_ = AlignVisionConfig(**A_ )
lowerCamelCase_ = projection_dim
lowerCamelCase_ = temperature_init_value
lowerCamelCase_ = initializer_range
@classmethod
def a__ ( cls : Optional[int] , A_ : AlignTextConfig , A_ : AlignVisionConfig , **A_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.text_config.to_dict()
lowerCamelCase_ = self.vision_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 204 |
def _SCREAMING_SNAKE_CASE ( lowercase : list ):
'''simple docstring'''
for i in range(len(lowercase ) - 1 , 0 , -1 ):
lowerCamelCase_ = False
for j in range(lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j]
lowerCamelCase_ = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j]
lowerCamelCase_ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(F"""{cocktail_shaker_sort(unsorted) = }""")
| 204 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
def update_area_of_max_square(__lowerCamelCase , __lowerCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case : List[Any] = update_area_of_max_square(__lowerCamelCase , col + 1 )
__snake_case : str = update_area_of_max_square(row + 1 , col + 1 )
__snake_case : Union[str, Any] = update_area_of_max_square(row + 1 , __lowerCamelCase )
if mat[row][col]:
__snake_case : Union[str, Any] = 1 + min([right, diagonal, down] )
__snake_case : Optional[int] = max(largest_square_area[0] , __lowerCamelCase )
return sub_problem_sol
else:
return 0
__snake_case : Optional[Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
def update_area_of_max_square_using_dp_array(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case : Optional[Any] = update_area_of_max_square_using_dp_array(__lowerCamelCase , col + 1 , __lowerCamelCase )
__snake_case : List[str] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCamelCase )
__snake_case : Tuple = update_area_of_max_square_using_dp_array(row + 1 , __lowerCamelCase , __lowerCamelCase )
if mat[row][col]:
__snake_case : Union[str, Any] = 1 + min([right, diagonal, down] )
__snake_case : int = max(largest_square_area[0] , __lowerCamelCase )
__snake_case : List[str] = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case : Optional[int] = [0]
__snake_case : Optional[int] = [[-1] * cols for _ in range(__lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCamelCase )
return largest_square_area[0]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case : Optional[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : Union[str, Any] = dp_array[row][col + 1]
__snake_case : List[Any] = dp_array[row + 1][col + 1]
__snake_case : Optional[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case : Union[str, Any] = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Any = max(dp_array[row][col] , __lowerCamelCase )
else:
__snake_case : str = 0
return largest_square_area
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = [0] * (cols + 1)
__snake_case : Optional[Any] = [0] * (cols + 1)
__snake_case : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : Tuple = current_row[col + 1]
__snake_case : Dict = next_row[col + 1]
__snake_case : int = next_row[col]
if mat[row][col] == 1:
__snake_case : List[Any] = 1 + min(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[Any] = max(current_row[col] , __lowerCamelCase )
else:
__snake_case : List[Any] = 0
__snake_case : Optional[int] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 356 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_snake_case : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=8 ):
__snake_case : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__snake_case : Optional[int] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : MultilingualCLIP , lowerCamelCase : XLMRobertaTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , lowerCamelCase : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
__snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : Any , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> Any:
if latents is None:
__snake_case : str = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__snake_case : Optional[int] = latents.to(lowerCamelCase )
__snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str=None , ) -> List[str]:
__snake_case : Tuple = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
__snake_case : Optional[int] = self.tokenizer(
lowerCamelCase , padding="max_length" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : List[str] = text_inputs.input_ids
__snake_case : List[Any] = self.tokenizer(lowerCamelCase , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : Any = text_input_ids.to(lowerCamelCase )
__snake_case : List[str] = text_inputs.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : List[str] = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
__snake_case : List[Any] = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : List[str] = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[int] = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : int = negative_prompt
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="pt" , )
__snake_case : Dict = uncond_input.input_ids.to(lowerCamelCase )
__snake_case : List[Any] = uncond_input.attention_mask.to(lowerCamelCase )
__snake_case , __snake_case : Tuple = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Dict = negative_prompt_embeds.shape[1]
__snake_case : int = negative_prompt_embeds.repeat(1 , lowerCamelCase )
__snake_case : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
__snake_case : Union[str, Any] = uncond_text_encoder_hidden_states.shape[1]
__snake_case : Tuple = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
__snake_case : str = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
__snake_case : Optional[int] = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__snake_case : List[Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__snake_case : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __snake_case ( self : List[str] , lowerCamelCase : Dict=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device(F'cuda:{gpu_id}' )
__snake_case : Optional[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : int=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__snake_case : Optional[Any] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__snake_case : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__snake_case , __snake_case : List[Any] = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
__snake_case , __snake_case : Optional[int] = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
__snake_case : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ) -> Optional[int]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 100 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> List[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
__snake_case : Any = self._execution_device
__snake_case : Any = batch_size * num_images_per_prompt
__snake_case : Any = guidance_scale > 1.0
__snake_case , __snake_case , __snake_case : Optional[Any] = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : str = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
__snake_case : Dict = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
__snake_case : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
__snake_case : Tuple = self.scheduler.timesteps
__snake_case : Union[str, Any] = self.unet.config.in_channels
__snake_case , __snake_case : Tuple = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
__snake_case : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : int = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__snake_case : Optional[Any] = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
__snake_case , __snake_case : Any = noise_pred.split(latents.shape[1] , dim=1 )
__snake_case , __snake_case : Union[str, Any] = noise_pred.chunk(2 )
__snake_case , __snake_case : str = variance_pred.chunk(2 )
__snake_case : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__snake_case : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__snake_case , __snake_case : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__snake_case : str = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
__snake_case : str = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__snake_case : Union[str, Any] = image * 0.5 + 0.5
__snake_case : Union[str, Any] = image.clamp(0 , 1 )
__snake_case : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : str = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 134 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def _A ( ) -> Dict:
'''simple docstring'''
__lowercase = argparse.ArgumentParser()
parser.add_argument("-f")
__lowercase = parser.parse_args()
return args.f
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, "run_glue_deebert.py" )
with patch.object(UpperCAmelCase__, "argv", UpperCAmelCase__ ):
__lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCAmelCase__, 0.666 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self : Optional[int] ):
__lowercase = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(UpperCAmelCase__ )
__lowercase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(UpperCAmelCase__ )
__lowercase = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(UpperCAmelCase__ )
| 17 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(lowerCAmelCase , 0 , lowerCAmelCase , args=(lowerCAmelCase) )[0]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return math.pow(lowerCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 354 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class UpperCamelCase_ ( a_ ):
_A : List[Any] = 'layoutlmv3'
def __init__( self , snake_case__=5_02_65 , snake_case__=7_68 , snake_case__=12 , snake_case__=12 , snake_case__=30_72 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=5_12 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=10_24 , snake_case__=1_28 , snake_case__=1_28 , snake_case__=True , snake_case__=32 , snake_case__=1_28 , snake_case__=64 , snake_case__=2_56 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=2_24 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
UpperCAmelCase = max_ad_position_embeddings
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = has_relative_attention_bias
UpperCAmelCase = rel_pos_bins
UpperCAmelCase = max_rel_pos
UpperCAmelCase = has_spatial_attention_bias
UpperCAmelCase = rel_ad_pos_bins
UpperCAmelCase = max_rel_ad_pos
UpperCAmelCase = text_embed
UpperCAmelCase = visual_embed
UpperCAmelCase = input_size
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = classifier_dropout
class UpperCamelCase_ ( a_ ):
_A : str = version.parse('1.12' )
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 1e-5
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return 12
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , """apply_ocr""" , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
UpperCAmelCase = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 248 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a__ ( A__ ):
def __lt__( self : Optional[int] , a : Any ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , a : List[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
# sort into stacks
for element in collection:
__lowerCamelCase = Stack([element] )
__lowerCamelCase = bisect_left(_lowerCAmelCase , _lowerCAmelCase )
if i != len(_lowerCAmelCase ):
stacks[i].append(_lowerCAmelCase )
else:
stacks.append(_lowerCAmelCase )
# use a heap-based merge to merge stack efficiently
__lowerCamelCase = merge(*(reversed(_lowerCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__UpperCAmelCase =input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase =[int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 67 |
"""simple docstring"""
from math import isqrt, loga
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = False
return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]]
def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ):
__lowerCAmelCase = degree * loga(_lowerCAmelCase )
__lowerCAmelCase = int(_lowerCAmelCase )
__lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = len(_lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 301 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =1.5
__a =int(factor * num_class_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_snake_case , aesthetic_weight=0.1 )
os.makedirs(F'{class_data_dir}/images' , exist_ok=_snake_case )
if len(list(Path(F'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
__a =client.query(text=_snake_case )
if len(_snake_case ) >= factor * num_class_images or num_images > 1e4:
break
else:
__a =int(factor * num_images )
__a =ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=_snake_case , aesthetic_weight=0.1 , )
__a =0
__a =0
__a =tqdm(desc='downloading real regularization images' , total=_snake_case )
with open(F'{class_data_dir}/caption.txt' , 'w' ) as fa, open(F'{class_data_dir}/urls.txt' , 'w' ) as fa, open(
F'{class_data_dir}/images.txt' , 'w' ) as fa:
while total < num_class_images:
__a =class_images[count]
count += 1
try:
__a =requests.get(images['url'] )
if img.status_code == 200:
__a =Image.open(BytesIO(img.content ) )
with open(F'{class_data_dir}/images/{total}.jpg' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'{class_data_dir}/images/{total}.jpg' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser('' , add_help=_snake_case )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=_snake_case , type=_snake_case )
parser.add_argument('--class_data_dir' , help='path to save images' , required=_snake_case , type=_snake_case )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=_snake_case )
return parser.parse_args()
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __magic_name__ ( unittest.TestCase , lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-to-speech' )
self.tool.setup()
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__a =self.tool('hey' )
__a =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 308 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
lowerCamelCase : Optional[Any] = []
for old_item in old_list:
lowerCamelCase : Dict = old_item.replace("in_layers.0" ,"norm1" )
lowerCamelCase : int = new_item.replace("in_layers.2" ,"conv1" )
lowerCamelCase : Any = new_item.replace("out_layers.0" ,"norm2" )
lowerCamelCase : Optional[Any] = new_item.replace("out_layers.3" ,"conv2" )
lowerCamelCase : List[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" )
lowerCamelCase : int = new_item.replace("skip_connection" ,"conv_shortcut" )
lowerCamelCase : Optional[int] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
lowerCamelCase : List[Any] = []
for old_item in old_list:
lowerCamelCase : int = old_item
lowerCamelCase : int = new_item.replace("norm.weight" ,"group_norm.weight" )
lowerCamelCase : Optional[Any] = new_item.replace("norm.bias" ,"group_norm.bias" )
lowerCamelCase : Union[str, Any] = new_item.replace("proj_out.weight" ,"proj_attn.weight" )
lowerCamelCase : int = new_item.replace("proj_out.bias" ,"proj_attn.bias" )
lowerCamelCase : List[Any] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : Tuple = old_checkpoint[path]
lowerCamelCase : Optional[int] = old_tensor.shape[0] // 3
lowerCamelCase : Dict = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : List[str] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCamelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
lowerCamelCase : int = query.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = key.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
lowerCamelCase : Optional[Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : Optional[int] = new_path.replace("middle_block.0" ,"mid_block.resnets.0" )
lowerCamelCase : Optional[int] = new_path.replace("middle_block.1" ,"mid_block.attentions.0" )
lowerCamelCase : Optional[Any] = new_path.replace("middle_block.2" ,"mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : str = new_path.replace(replacement["old"] ,replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCamelCase : int = old_checkpoint[path["old"]]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : str = checkpoint["time_embed.0.weight"]
lowerCamelCase : str = checkpoint["time_embed.0.bias"]
lowerCamelCase : Tuple = checkpoint["time_embed.2.weight"]
lowerCamelCase : Any = checkpoint["time_embed.2.bias"]
lowerCamelCase : Any = checkpoint["input_blocks.0.0.weight"]
lowerCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
lowerCamelCase : Dict = checkpoint["out.0.weight"]
lowerCamelCase : Dict = checkpoint["out.0.bias"]
lowerCamelCase : Union[str, Any] = checkpoint["out.2.weight"]
lowerCamelCase : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCamelCase : Any = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCamelCase : str = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCamelCase : int = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : int = (i - 1) // (config["num_res_blocks"] + 1)
lowerCamelCase : int = (i - 1) % (config["num_res_blocks"] + 1)
lowerCamelCase : Any = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowerCamelCase : Optional[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCamelCase : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowerCamelCase : Dict = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCamelCase : int = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path, resnet_op] ,config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = {
"old": f'''input_blocks.{i}.1''',
"new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Tuple = {
f'''input_blocks.{i}.1.qkv.bias''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = middle_blocks[0]
lowerCamelCase : str = middle_blocks[1]
lowerCamelCase : Tuple = middle_blocks[2]
lowerCamelCase : int = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = i // (config["num_res_blocks"] + 1)
lowerCamelCase : Optional[Any] = i % (config["num_res_blocks"] + 1)
lowerCamelCase : List[str] = [shave_segments(_SCREAMING_SNAKE_CASE ,2 ) for name in output_blocks[i]]
lowerCamelCase : Optional[int] = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : List[str] = layer.split("." )[0], shave_segments(_SCREAMING_SNAKE_CASE ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[int] = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowerCamelCase : Any = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : Optional[int] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCamelCase : Dict = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCamelCase : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
lowerCamelCase : List[str] = []
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = {
"old": f'''output_blocks.{i}.1''',
"new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Any = {
f'''output_blocks.{i}.1.qkv.bias''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=_SCREAMING_SNAKE_CASE ,)
else:
lowerCamelCase : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Tuple = ".".join(["output_blocks", str(_SCREAMING_SNAKE_CASE ), path["old"]] )
lowerCamelCase : Optional[int] = ".".join(["up_blocks", str(_SCREAMING_SNAKE_CASE ), "resnets", str(_SCREAMING_SNAKE_CASE ), path["new"]] )
lowerCamelCase : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ : int = json.loads(f.read())
SCREAMING_SNAKE_CASE__ : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : int = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 48 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE : List[str] = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Any = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase__ ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase__ ):
http_head("https://huggingface.co" )
| 57 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=False ):
"""simple docstring"""
A = OmegaConf.load(lowercase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) )
return config
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=None , lowercase__=None ):
"""simple docstring"""
if conf_path is None:
A = "./model_checkpoints/vqgan_only.yaml"
A = load_config(lowercase__ , display=lowercase__ )
A = VQModel(**config.model.params )
if ckpt_path is None:
A = "./model_checkpoints/vqgan_only.pt"
A = torch.load(lowercase__ , map_location=lowercase__ )
if ".ckpt" in ckpt_path:
A = sd["state_dict"]
model.load_state_dict(lowercase__ , strict=lowercase__ )
model.to(lowercase__ )
del sd
return model
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
A , A , A = model.encode(lowercase__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
A = model.decode(lowercase__ )
return xrec
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=False ):
"""simple docstring"""
A , A = string.rsplit("." , 1 )
if reload:
A = importlib.import_module(lowercase__ )
importlib.reload(lowercase__ )
return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls )
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__=True , lowercase__=True ):
"""simple docstring"""
A = instantiate_from_config(lowercase__ )
if sd is not None:
model.load_state_dict(lowercase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
A = torch.load(lowercase__ , map_location="cpu" )
A = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
A = {"state_dict": None}
A = None
A = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowercase__ , eval_mode=lowercase__ )["model"]
return model, global_step
| 57 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( lowercase_ ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor", "tokenizer"]
__lowerCAmelCase = "BridgeTowerImageProcessor"
__lowerCAmelCase = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ) -> int:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> BatchEncoding:
a =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel_values + pixel_mask
a =self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , **lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.tokenizer.model_input_names
a =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 81 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=99 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : str=5_12 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Union[str, Any]=1 , ) -> List[Any]:
'''simple docstring'''
A__ : Dict =parent
A__ : Optional[int] =batch_size
A__ : List[Any] =seq_length
A__ : Any =is_training
A__ : List[str] =use_input_mask
A__ : str =use_token_type_ids
A__ : Tuple =use_labels
A__ : Tuple =vocab_size
A__ : Optional[Any] =hidden_size
A__ : Dict =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Union[str, Any] =hidden_act
A__ : List[Any] =hidden_dropout_prob
A__ : Union[str, Any] =attention_probs_dropout_prob
A__ : Dict =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Any =type_sequence_label_size
A__ : int =initializer_range
A__ : str =num_labels
A__ : Optional[int] =num_choices
A__ : Optional[int] =scope
A__ : List[str] =q_groups
A__ : Dict =k_groups
A__ : Any =v_groups
A__ : Optional[Any] =post_attention_groups
A__ : Optional[int] =intermediate_groups
A__ : Optional[int] =output_groups
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] =None
if self.use_input_mask:
A__ : str =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] =None
A__ : Tuple =None
A__ : Dict =None
if self.use_labels:
A__ : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int =ids_tensor([self.batch_size] , self.num_choices )
A__ : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Dict =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
A__ : Union[str, Any] =SqueezeBertForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
A__ : str =SqueezeBertForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Dict =self.num_labels
A__ : int =SqueezeBertForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : str =self.num_labels
A__ : int =SqueezeBertForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Union[str, Any] =self.num_choices
A__ : Dict =SqueezeBertForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Optional[Any] =model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : Any =self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__) , (A__) , (A__)) : Any =config_and_inputs
A__ : str ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = True
__snake_case = False
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ : Optional[Any] =SqueezeBertModelTester(self )
A__ : int =ConfigTester(self , config_class=lowerCAmelCase_ , dim=37 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int =SqueezeBertModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
A__ : List[str] =torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
A__ : Tuple =model(lowerCAmelCase_ )[0]
A__ : Union[str, Any] =torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase_ )
A__ : Tuple =torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
| 134 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 1_000_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 357 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
_lowerCAmelCase : Tuple = json.load(f)
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str , A : Union[str, Any] ):
return FSMTTokenizer.from_pretrained(A )
def snake_case_ ( self : Union[str, Any] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(A ).to(A )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def snake_case_ ( self : Any , A : Dict , A : List[str] ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_UpperCAmelCase : Any = f'facebook/wmt19-{pair}'
_UpperCAmelCase : Dict = self.get_tokenizer(A )
_UpperCAmelCase : Optional[int] = self.get_model(A )
_UpperCAmelCase : int = bleu_data[pair]["src"]
_UpperCAmelCase : Optional[int] = bleu_data[pair]["tgt"]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="pt" , truncation=A , padding="longest" ).to(A )
_UpperCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCAmelCase : Any = tokenizer.batch_decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
_UpperCAmelCase : Any = calculate_bleu(A , A )
print(A )
self.assertGreaterEqual(scores["bleu"] , A )
| 202 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 |
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[Any] = min(a__) # min() finds the minimum value
a_ : List[str] = max(a__) # max() finds the maximum value
a_ : str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
a_ : Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
a_ : Tuple = 0
for count in range(a__):
while holes[count] > 0:
holes[count] -= 1
a_ : Optional[Any] = count + min_val
i += 1
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__)
print("""Sorted order is:""" , """ """.join(a__))
if __name__ == "__main__":
main()
| 248 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 305 | import torch
def a__ ( ):
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE_ = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 305 | 1 |
import string
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
lowercase : Optional[int] = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase : Tuple = string.ascii_uppercase.find(__magic_name__ )
lowercase : Tuple = num - key
if num < 0:
lowercase : str = num + len(string.ascii_uppercase )
lowercase : str = translated + string.ascii_uppercase[num]
else:
lowercase : Any = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Dict = input('''Encrypted message: ''' )
lowercase : Tuple = message.upper()
decrypt(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 308 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A ) | 308 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def UpperCamelCase ( __lowerCamelCase : Dict ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case : Optional[int] = k.replace(__lowerCamelCase , __lowerCamelCase )
if k.startswith("encoder" ):
snake_case : Optional[int] = k.replace(".attn" , ".self_attn" )
snake_case : List[str] = k.replace("norm1" , "self_attn_layer_norm" )
snake_case : Optional[Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
snake_case : str = k.replace("norm1" , "self_attn_layer_norm" )
snake_case : Union[str, Any] = k.replace("norm2" , "encoder_attn_layer_norm" )
snake_case : Any = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( __lowerCamelCase : Any ):
snake_case : Tuple = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
snake_case : int = sd.pop(__lowerCamelCase )
snake_case : str = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
snake_case : Union[str, Any] = v
__lowerCamelCase = ["""START"""]
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ):
snake_case : Optional[int] = torch.load(__lowerCamelCase , map_location="cpu" )
snake_case : Optional[Any] = model["model"]
snake_case : Optional[Any] = BlenderbotConfig.from_json_file(__lowerCamelCase )
snake_case : Optional[Any] = BlenderbotForConditionalGeneration(__lowerCamelCase )
snake_case : List[Any] = m.model.state_dict().keys()
snake_case : str = []
snake_case : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case : Optional[Any] = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
__lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 361 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DPTFeatureExtractor"""]
__lowerCamelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__lowerCAmelCase = transform(_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
return image
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCAmelCase = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCamelCase )
if "blocks" in key:
__lowerCAmelCase = re.sub(R"blocks" , "layers" , _UpperCamelCase )
if "attn" in key:
__lowerCAmelCase = re.sub(R"attn" , "self_attn" , _UpperCamelCase )
if "norm1" in key:
__lowerCAmelCase = re.sub(R"norm1" , "layer_norm1" , _UpperCamelCase )
if "norm2" in key:
__lowerCAmelCase = re.sub(R"norm2" , "layer_norm2" , _UpperCamelCase )
if "encoder.norm" in key:
__lowerCAmelCase = re.sub(R"encoder.norm" , "post_layernorm" , _UpperCamelCase )
if "encoder.patch_embed.proj" in key:
__lowerCAmelCase = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCamelCase )
if "encoder.pos_embed" in key:
__lowerCAmelCase = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCamelCase )
if "encoder.cls_token" in key:
__lowerCAmelCase = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _UpperCamelCase )
if "self_attn" in key:
__lowerCAmelCase = re.sub(R"self_attn.proj" , "self_attn.projection" , _UpperCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = BlipConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCAmelCase = BlipForConditionalGeneration(_UpperCamelCase ).eval()
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
__lowerCAmelCase = blip_decoder(pretrained=_UpperCamelCase , image_size=384 , vit="base" )
__lowerCAmelCase = pt_model.eval()
__lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
hf_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = 384
__lowerCAmelCase = load_demo_image(image_size=_UpperCamelCase , device="cpu" )
__lowerCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowerCAmelCase = tokenizer(["a picture of"] ).input_ids
__lowerCAmelCase = hf_model.generate(_UpperCamelCase , _UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCAmelCase = hf_model.generate(_UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCAmelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
__lowerCAmelCase = blip_vqa(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
vqa_model.eval()
__lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForQuestionAnswering(_UpperCamelCase )
hf_vqa_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = ["How many dogs are in this image?"]
__lowerCAmelCase = tokenizer(_UpperCamelCase , return_tensors="pt" ).input_ids
__lowerCAmelCase = hf_vqa_model.generate(_UpperCamelCase , _UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
__lowerCAmelCase = blip_itm(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
itm_model.eval()
__lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForImageTextRetrieval(_UpperCamelCase )
__lowerCAmelCase = ["A picture of a woman with a dog sitting in a beach"]
__lowerCAmelCase = tokenizer(
_UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCamelCase )
hf_itm_model.eval()
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 57 |
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': 512,
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : int = RetriBertTokenizer
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="[UNK]" , lowerCamelCase__ : List[Any]="[SEP]" , lowerCamelCase__ : Dict="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int , ) ->int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : Dict = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[int] = do_lower_case
_UpperCAmelCase : Optional[Any] = strip_accents
_UpperCAmelCase : str = tokenize_chinese_chars
_UpperCAmelCase : Optional[int] = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = do_lower_case
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=None ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : int = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 322 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase : Optional[int] = (7_20, 12_80) # Height, Width
lowercase : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase : List[Any] = 1 / 1_00
lowercase : Optional[Any] = ''
lowercase : Dict = ''
lowercase : str = ''
lowercase : str = 2_50
def lowerCAmelCase_ ( ):
'''simple docstring'''
A, A : Dict = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
A : Tuple = random.sample(range(len(snake_case__ ) ) , 4 )
A, A, A : List[str] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A : Union[str, Any] = random_chars(32 )
A : Any = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
A : Union[str, Any] = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
A : Dict = []
for anno in new_annos:
A : Optional[int] = anno[3] - anno[1]
A : int = anno[4] - anno[2]
A : str = anno[1] + width / 2
A : List[str] = anno[2] + height / 2
A : Dict = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(snake_case__ )
with open(F'{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = []
A : Dict = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
A : List[str] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
A : Optional[Any] = in_file.readlines()
A : Optional[Any] = os.path.join(snake_case__ , F'{label_name}.jpg' )
A : Tuple = []
for obj_list in obj_lists:
A : List[Any] = obj_list.rstrip('''\n''' ).split(''' ''' )
A : str = float(obj[1] ) - float(obj[3] ) / 2
A : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
A : int = float(obj[1] ) + float(obj[3] ) / 2
A : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.0 , ):
'''simple docstring'''
A : List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A : Optional[Any] = int(scale_x * output_size[1] )
A : Union[str, Any] = int(scale_y * output_size[0] )
A : Union[str, Any] = []
A : Union[str, Any] = []
for i, index in enumerate(snake_case__ ):
A : List[Any] = all_img_list[index]
path_list.append(snake_case__ )
A : Tuple = all_annos[index]
A : Optional[Any] = cva.imread(snake_case__ )
if i == 0: # top-left
A : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
A : Optional[Any] = img
for bbox in img_annos:
A : Union[str, Any] = bbox[1] * scale_x
A : Optional[Any] = bbox[2] * scale_y
A : int = bbox[3] * scale_x
A : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A : Any = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
A : Optional[Any] = img
for bbox in img_annos:
A : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
A : Tuple = bbox[2] * scale_y
A : Any = scale_x + bbox[3] * (1 - scale_x)
A : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A : Optional[Any] = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
A : Any = img
for bbox in img_annos:
A : Optional[Any] = bbox[1] * scale_x
A : List[str] = scale_y + bbox[2] * (1 - scale_y)
A : Optional[Any] = bbox[3] * scale_x
A : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A : List[str] = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A : str = img
for bbox in img_annos:
A : Dict = scale_x + bbox[1] * (1 - scale_x)
A : int = scale_y + bbox[2] * (1 - scale_y)
A : List[Any] = scale_x + bbox[3] * (1 - scale_x)
A : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
A : int = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 3 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str:
lowercase : int = None
if token is not None:
lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = "636036"
lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : int = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def __magic_name__ ( __snake_case : Dict ) -> Tuple:
lowercase : Tuple = get_daily_ci_runs(__snake_case )
lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int:
lowercase : Dict = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]:
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
lowercase : str = {}
for artifact_name in artifact_names:
lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" )
if os.path.isfile(__snake_case ):
lowercase : List[Any] = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
lowercase : str = f.read().decode("UTF-8" )
return results
| 202 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = checkpoint
lowerCAmelCase : str = {}
lowerCAmelCase : Tuple = vae_state_dict['''encoder.conv_in.weight''']
lowerCAmelCase : str = vae_state_dict['''encoder.conv_in.bias''']
lowerCAmelCase : Optional[int] = vae_state_dict['''encoder.conv_out.weight''']
lowerCAmelCase : Any = vae_state_dict['''encoder.conv_out.bias''']
lowerCAmelCase : Tuple = vae_state_dict['''encoder.norm_out.weight''']
lowerCAmelCase : int = vae_state_dict['''encoder.norm_out.bias''']
lowerCAmelCase : Dict = vae_state_dict['''decoder.conv_in.weight''']
lowerCAmelCase : Dict = vae_state_dict['''decoder.conv_in.bias''']
lowerCAmelCase : Dict = vae_state_dict['''decoder.conv_out.weight''']
lowerCAmelCase : Any = vae_state_dict['''decoder.conv_out.bias''']
lowerCAmelCase : Any = vae_state_dict['''decoder.norm_out.weight''']
lowerCAmelCase : Union[str, Any] = vae_state_dict['''decoder.norm_out.bias''']
lowerCAmelCase : Optional[Any] = vae_state_dict['''quant_conv.weight''']
lowerCAmelCase : Optional[Any] = vae_state_dict['''quant_conv.bias''']
lowerCAmelCase : int = vae_state_dict['''post_quant_conv.weight''']
lowerCAmelCase : Optional[int] = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowerCAmelCase : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase : Optional[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowerCAmelCase : Union[str, Any] = {
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCAmelCase )
}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : List[str] = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase : int = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase : List[str] = vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase : List[str] = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : int = {'''old''': f"down.{i}.block", '''new''': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : str = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowerCAmelCase : Any = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowerCAmelCase : List[Any] = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
lowerCAmelCase : Optional[int] = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : List[Any] = {'''old''': f"mid.block_{i}", '''new''': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : List[str] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowerCAmelCase : Union[str, Any] = renew_vae_attention_paths(_UpperCAmelCase )
lowerCAmelCase : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Union[str, Any] = num_up_blocks - 1 - i
lowerCAmelCase : int = [
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase : Optional[int] = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase : int = vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase : Any = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : List[str] = {'''old''': f"up.{block_id}.block", '''new''': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowerCAmelCase : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowerCAmelCase : str = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
lowerCAmelCase : Tuple = renew_vae_resnet_paths(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = {'''old''': f"mid.block_{i}", '''new''': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
lowerCAmelCase : Optional[int] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowerCAmelCase : Any = renew_vae_attention_paths(_UpperCAmelCase )
lowerCAmelCase : List[Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, additional_replacements=[meta_path], config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : int = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowerCAmelCase : int = io.BytesIO(r.content )
lowerCAmelCase : Tuple = OmegaConf.load(_UpperCAmelCase )
lowerCAmelCase : Dict = 512
lowerCAmelCase : Optional[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowerCAmelCase : List[str] = {}
with safe_open(_UpperCAmelCase, framework='pt', device='cpu' ) as f:
for key in f.keys():
lowerCAmelCase : str = f.get_tensor(_UpperCAmelCase )
else:
lowerCAmelCase : int = torch.load(_UpperCAmelCase, map_location=_UpperCAmelCase )['''state_dict''']
# Convert the VAE model.
lowerCAmelCase : Optional[Any] = create_vae_diffusers_config(_UpperCAmelCase, image_size=_UpperCAmelCase )
lowerCAmelCase : Any = custom_convert_ldm_vae_checkpoint(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[str] = AutoencoderKL(**_UpperCAmelCase )
vae.load_state_dict(_UpperCAmelCase )
vae.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__A : Optional[Any] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : List[str] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''funnel'''
A__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__(self : List[Any] , _UpperCAmelCase : Dict=3_0522 , _UpperCAmelCase : List[Any]=[4, 4, 4] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : List[str]=64 , _UpperCAmelCase : int=3072 , _UpperCAmelCase : Tuple="gelu_new" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : int=None , _UpperCAmelCase : List[Any]=1E-9 , _UpperCAmelCase : List[Any]="mean" , _UpperCAmelCase : int="relative_shift" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : str=True , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = initializer_range
lowercase__ = initializer_std
lowercase__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
lowercase__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
lowercase__ = attention_type
lowercase__ = separate_cls
lowercase__ = truncate_seq
lowercase__ = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def lowerCamelCase__ (self : str ) -> Union[str, Any]:
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 305 |
from __future__ import annotations
import math
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if len(__magic_name__ ) != 2 or len(a[0] ) != 2 or len(__magic_name__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> int:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__magic_name__ ) )
]
def UpperCamelCase ( __magic_name__ : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__magic_name__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase__ = len(__magic_name__ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [
[a[i][j] for j in range(__magic_name__ , __magic_name__ )] for i in range(__magic_name__ , __magic_name__ )
]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ )]
lowercase__ = [[a[i][j] for j in range(__magic_name__ )] for i in range(__magic_name__ , __magic_name__ )]
return top_left, top_right, bot_left, bot_right
def UpperCamelCase ( __magic_name__ : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__magic_name__ ), len(matrix[0] )
def UpperCamelCase ( __magic_name__ : list ) -> None:
"""simple docstring"""
print("""\n""".join(str(__magic_name__ ) for line in matrix ) )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ ) == (2, 2):
return default_matrix_multiplication(__magic_name__ , __magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(__magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase__ = actual_strassen(__magic_name__ , matrix_subtraction(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_addition(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = actual_strassen(matrix_subtraction(__magic_name__ , __magic_name__ ) , matrix_addition(__magic_name__ , __magic_name__ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_addition(__magic_name__ , __magic_name__ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__magic_name__ , __magic_name__ ) , __magic_name__ ) , __magic_name__ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(__magic_name__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__magic_name__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__magic_name__ )[1] != matrix_dimensions(__magic_name__ )[0]:
lowercase__ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
lowercase__ = matrix_dimensions(__magic_name__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*__magic_name__ , *__magic_name__ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(__magic_name__ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(__magic_name__ , __magic_name__ )
# Removing the additional zeros
for i in range(0 , __magic_name__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __magic_name__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A : Optional[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A : List[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 305 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : int = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_UpperCAmelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A ( lowercase ) -> List[str]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase = model_type_to_module_name(_snake_case )
UpperCamelCase = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_snake_case , _snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case , '__name__' , _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase = importlib.import_module('transformers' )
if hasattr(_snake_case , _snake_case ):
return getattr(_snake_case , _snake_case )
return None
def A ( lowercase , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = False , **lowercase , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = get_file_from_repo(
_snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_snake_case , encoding='utf-8' ) as reader:
return json.load(_snake_case )
class lowercase :
def __init__( self ) -> List[Any]:
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_a )
def __UpperCamelCase ( cls , A_ , **A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = kwargs.pop('config' , _a )
UpperCamelCase = kwargs.pop('trust_remote_code' , _a )
UpperCamelCase = True
UpperCamelCase = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
UpperCamelCase = config_dict.get('feature_extractor_type' , _a )
UpperCamelCase = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCamelCase = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
UpperCamelCase = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
UpperCamelCase = getattr(_a , 'feature_extractor_type' , _a )
if hasattr(_a , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCamelCase = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
UpperCamelCase = feature_extractor_class_from_name(_a )
UpperCamelCase = feature_extractor_auto_map is not None
UpperCamelCase = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
UpperCamelCase = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
UpperCamelCase = get_class_from_dynamic_module(
_a , _a , **_a )
UpperCamelCase = kwargs.pop('code_revision' , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
UpperCamelCase = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __UpperCamelCase ( A_ , A_ ) -> Optional[int]:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 369 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Optional[Any] = GPTSwaTokenizer
__lowercase : Optional[Any] = False
__lowercase : Union[str, Any] = True
__lowercase : Tuple = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(A_ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = 'This is a test'
return input_text, output_text
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(A_ ) , 2_000 )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(A_ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [465, 287, 265, 631, 842] )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
# fmt: off
self.assertListEqual(
A_ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(A_ )
UpperCamelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCamelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(A_ , A_ ):
self.assertListEqual(tokenizer.encode_fast(A_ ) , A_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(A_ , A_ ):
self.assertEqual(tokenizer.decode_fast(A_ ) , A_ )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCamelCase = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=A_ , )
| 110 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class UpperCamelCase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 323 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , ) -> List[str]:
_A : int = parent
_A : Dict = batch_size
_A : Optional[int] = image_size
_A : Tuple = patch_size
_A : Optional[Any] = num_channels
_A : List[Any] = is_training
_A : Any = use_labels
_A : Optional[Any] = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Dict = intermediate_size
_A : Optional[int] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Optional[int] = type_sequence_label_size
_A : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : Union[str, Any] = (image_size // patch_size) ** 2
_A : Dict = num_patches + 1
def a__ ( self ) -> int:
_A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
return config, pixel_values
def a__ ( self , _a , _a ) -> int:
_A : str = FlaxViTModel(config=_a )
_A : Tuple = model(_a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_A : Tuple = (self.image_size, self.image_size)
_A : Union[str, Any] = (self.patch_size, self.patch_size)
_A : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = self.type_sequence_label_size
_A : Optional[int] = FlaxViTForImageClassification(config=_a )
_A : str = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : List[Any] = 1
_A : Any = FlaxViTForImageClassification(_a )
_A : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : Any = model(_a )
def a__ ( self ) -> str:
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) ,
) : List[str] = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a__ ( self ) -> None:
_A : List[Any] = FlaxViTModelTester(self )
_A : Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> str:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def a__ ( self ) -> List[str]:
_A , _A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Optional[Any]:
_A , _A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : Union[str, Any] = self._prepare_for_class(_a , _a )
_A : Tuple = model_class(_a )
@jax.jit
def model_jitted(_a , **_a ):
return model(pixel_values=_a , **_a )
with self.subTest("""JIT Enabled""" ):
_A : List[str] = model_jitted(**_a ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_A : List[str] = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a__ ( self ) -> Any:
for model_class_name in self.all_model_classes:
_A : Dict = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
_A : List[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
from collections.abc import Sequence
def _a ( SCREAMING_SNAKE_CASE : Sequence[float] , SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : Sequence[float] , SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
__lowerCAmelCase: str = 0.0
for coeff in reversed(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Dict = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 1 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
lowercase__ = tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
lowercase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1E-5 , SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase__ , lowercase__ = tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__ = [1] * inputs.shape.rank
lowercase__ = shape_list(SCREAMING_SNAKE_CASE )[axis]
lowercase__ = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
lowercase__ = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__ = tf.shape(SCREAMING_SNAKE_CASE )
lowercase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
lowercase__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
f'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding '
f'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__ = [x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
f'bytes: {bad_attributes}' )
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE )
lowercase__ = 1
lowercase__ = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__ = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = chunk_data
else:
lowercase__ = data
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name in group.attrs:
lowercase__ = [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase__ = []
lowercase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 93 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
lowercase__ = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowercase__ = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = original_name.split("." )[0]
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find("proj" )]
__lowerCAmelCase = key.replace(_UpperCamelCase , f"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1000
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1000)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 12, 4]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 24, 8]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 57 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self , _a , _a=3 , _a=32 , _a=3 , _a=10 , _a=[8, 16, 32, 64] , _a=[1, 1, 2, 1] , _a=True , _a=True , _a="relu" , _a=3 , _a=None , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=1 , ) -> Tuple:
_A : Dict = parent
_A : int = batch_size
_A : str = image_size
_A : Optional[Any] = num_channels
_A : Optional[Any] = embeddings_size
_A : List[str] = hidden_sizes
_A : Any = depths
_A : Dict = is_training
_A : List[str] = use_labels
_A : Any = hidden_act
_A : Dict = num_labels
_A : Tuple = scope
_A : List[Any] = len(_a )
_A : List[str] = out_features
_A : Tuple = out_indices
_A : int = num_groups
def a__ ( self ) -> int:
_A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Tuple = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> str:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a__ ( self , _a , _a , _a ) -> Union[str, Any]:
_A : Tuple = BitModel(config=_a )
model.to(_a )
model.eval()
_A : str = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : str = self.num_labels
_A : List[Any] = BitForImageClassification(_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Optional[int] = BitBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[int] = None
_A : Tuple = BitBackbone(config=_a )
model.to(_a )
model.eval()
_A : Union[str, Any] = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_a = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = BitModelTester(self )
_A : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def a__ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> Tuple:
return
@unittest.skip(reason="""Bit does not output attentions""" )
def a__ ( self ) -> int:
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def a__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def a__ ( self ) -> Dict:
pass
def a__ ( self ) -> Tuple:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(_a )
_A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Tuple:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Optional[int]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Any:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Optional[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : List[str] = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : str = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_A : Optional[Any] = layer_type
_A : Dict = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Any = True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def a__ ( self ) -> str:
pass
def a__ ( self ) -> Optional[int]:
_A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Tuple = BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Optional[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a__ ( self ) -> str:
_A : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_A : Tuple = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : str = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = (BitBackbone,) if is_torch_available() else ()
_a = BitConfig
_a = False
def a__ ( self ) -> int:
_A : Tuple = BitModelTester(self )
| 343 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Any = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_A : Union[str, Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : str = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_A : int = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[str] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : int = None
if token is not None:
_A : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : str = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_A : Optional[Any] = requests.get(snake_case_,headers=snake_case_ ).json()
_A : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_A : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
_A : List[Any] = requests.get(url + f'''&page={i + 2}''',headers=snake_case_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Dict = None
if token is not None:
_A : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''}
_A : Tuple = requests.get(snake_case_,headers=snake_case_,allow_redirects=snake_case_ )
_A : Tuple = result.headers["""Location"""]
_A : Union[str, Any] = requests.get(snake_case_,allow_redirects=snake_case_ )
_A : Dict = os.path.join(snake_case_,f'''{artifact_name}.zip''' )
with open(snake_case_,"""wb""" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : List[str] = []
_A : int = []
_A : Tuple = None
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(snake_case_ ) as f:
for line in f:
_A : Any = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_A : Dict = line[: line.index(""": """ )]
_A : Dict = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_A : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(snake_case_ )
elif filename == "job_name.txt":
_A : Optional[int] = line
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(snake_case_ )} for `errors` '''
f'''and {len(snake_case_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
_A : Any = None
if job_name and job_links:
_A : Dict = job_links.get(snake_case_,snake_case_ )
# A list with elements of the form (line of error, error, failed test)
_A : Optional[int] = [x + [y] + [job_link] for x, y in zip(snake_case_,snake_case_ )]
return result
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = []
_A : Optional[int] = [os.path.join(snake_case_,snake_case_ ) for p in os.listdir(snake_case_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(snake_case_,job_links=snake_case_ ) )
return errors
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : Dict = Counter()
counter.update([x[1] for x in logs] )
_A : Tuple = counter.most_common()
_A : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_A : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_A : Dict = test.split("""/""" )[2]
else:
_A : str = None
return test
def lowerCAmelCase_ ( snake_case_,snake_case_=None ):
_A : str = [(x[0], x[1], get_model(x[2] )) for x in logs]
_A : Union[str, Any] = [x for x in logs if x[2] is not None]
_A : Optional[Any] = {x[2] for x in logs}
_A : List[Any] = {}
for test in tests:
_A : Any = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_A : Union[str, Any] = counter.most_common()
_A : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_A : str = sum(error_counts.values() )
if n_errors > 0:
_A : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
_A : Union[str, Any] = dict(sorted(r.items(),key=lambda snake_case_ : item[1]["count"],reverse=snake_case_ ) )
return r
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[int] = """| no. | error | status |"""
_A : List[Any] = """|-:|:-|:-|"""
_A : List[Any] = [header, sep]
for error in reduced_by_error:
_A : List[str] = reduced_by_error[error]["""count"""]
_A : List[Any] = f'''| {count} | {error[:100]} | |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = """| model | no. of errors | major error | count |"""
_A : Optional[Any] = """|-:|-:|-:|-:|"""
_A : Union[str, Any] = [header, sep]
for model in reduced_by_model:
_A : Dict = reduced_by_model[model]["""count"""]
_A , _A : str = list(reduced_by_model[model]["""errors"""].items() )[0]
_A : Union[str, Any] = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(snake_case_ )
return "\n".join(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_snake_case = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_snake_case = get_job_links(args.workflow_run_id, token=args.token)
_snake_case = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_snake_case = k.find(" / ")
_snake_case = k[index + len(" / ") :]
_snake_case = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_snake_case = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_snake_case = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_snake_case = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_snake_case = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_snake_case = reduce_by_error(errors)
_snake_case = reduce_by_model(errors)
_snake_case = make_github_table(reduced_by_error)
_snake_case = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 343 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
lowercase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ = tensor_value
lowercase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
lowercase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 110 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
A__ : Optional[Any] = 6
A__ : Union[str, Any] = 1_2_8
A__ : Dict = (2, 2, 1_8, 2)
A__ : Optional[int] = (4, 8, 1_6, 3_2)
elif "large" in model_name:
A__ : Any = 1_2
A__ : str = 1_9_2
A__ : List[str] = (2, 2, 1_8, 2)
A__ : Any = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
A__ : Optional[int] = window_size
A__ : Optional[int] = embed_dim
A__ : Dict = depths
A__ : List[str] = num_heads
return config
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
if "encoder.mask_token" in name:
A__ : Dict = name.replace("""encoder.mask_token""", """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
A__ : List[str] = name.replace("""encoder.patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
A__ : Union[str, Any] = name.replace("""encoder.patch_embed.norm""", """embeddings.norm""" )
if "attn.proj" in name:
A__ : List[Any] = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
A__ : List[str] = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
A__ : Tuple = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
A__ : Optional[Any] = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
A__ : str = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : str = name.replace("""mlp.fc2""", """output.dense""" )
if name == "encoder.norm.weight":
A__ : List[str] = """layernorm.weight"""
if name == "encoder.norm.bias":
A__ : Dict = """layernorm.bias"""
if "decoder" in name:
pass
else:
A__ : Tuple = """swin.""" + name
return name
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : List[Any] ) ->Tuple:
for key in orig_state_dict.copy().keys():
A__ : str = orig_state_dict.pop(UpperCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ : Optional[Any] = key.split(""".""" )
A__ : Union[str, Any] = int(key_split[2] )
A__ : Tuple = int(key_split[4] )
A__ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ : Optional[Any] = val[:dim, :]
A__ : int = val[
dim : dim * 2, :
]
A__ : Union[str, Any] = val[-dim:, :]
else:
A__ : List[Any] = val[
:dim
]
A__ : List[str] = val[
dim : dim * 2
]
A__ : int = val[
-dim:
]
else:
A__ : Optional[Any] = val
return orig_state_dict
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Any, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ) ->List[Any]:
A__ : Tuple = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""model"""]
A__ : List[Any] = get_swin_config(UpperCAmelCase__ )
A__ : Tuple = SwinForMaskedImageModeling(UpperCAmelCase__ )
model.eval()
A__ : Dict = convert_state_dict(UpperCAmelCase__, UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Tuple = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2} )
A__ : str = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
A__ : List[Any] = image_processor(images=UpperCAmelCase__, return_tensors="""pt""" )
with torch.no_grad():
A__ : Optional[Any] = model(**UpperCAmelCase__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 360 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343 | from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(lowerCamelCase_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase_ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),)
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(lowerCamelCase_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) )
UpperCamelCase = model(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" )
model.save(lowerCamelCase_ )
UpperCamelCase = tf.keras.models.load_model(
lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase_ , tf.keras.Model )
UpperCamelCase = model(lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["""last_hidden_state"""].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["""logits"""].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1E-5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(lowerCamelCase_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ )
self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> int:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
| 343 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a_ ( a_ ):
lowercase = """informer"""
lowercase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "student_t" , _SCREAMING_SNAKE_CASE = "nll" , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "mean" , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "gelu" , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE = "prob" , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = prediction_length
UpperCamelCase = context_length or prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
# Informer
UpperCamelCase = attention_type
UpperCamelCase = sampling_factor
UpperCamelCase = distil
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 358 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint["""input_conv.weight_g"""]
UpperCamelCase = checkpoint["""input_conv.weight_v"""]
UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = np.load(__UpperCamelCase )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 183 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.