code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return base * power(__SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_lowercase : List[Any] = int(input("Enter the base: ").strip())
_lowercase : Any = int(input("Enter the exponent: ").strip())
_lowercase : Union[str, Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowercase : int = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93 |
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
A_ : int = '1'
A_ : Any = '0'
A_ : Optional[int] = '1'
A_ : str = ort.SessionOptions()
A_ : Any = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
A_ : Tuple = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
A_ : Dict = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
A_ : str = ort.RunOptions()
A_ : Union[str, Any] = 128
A_ : str = 1
A_ : str = np.ones((batch, sequence), dtype=np.intaa)
A_ : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
A_ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
A_ : Tuple = time.time()
A_ : int = 2000
A_ : Optional[int] = {}
for iter in range(max_iters):
A_ : Dict = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 141 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : str = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , A__=None , **A__ ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ : Dict = model
A__ : Any = kwargs.get("""model_save_dir""" , A__ )
A__ : Optional[int] = kwargs.get("""latest_model_name""" , A__ )
def __call__( self , **A__ ):
A__ : int = {k: np.array(A__ ) for k, v in kwargs.items()}
return self.model.run(A__ , A__ )
@staticmethod
def __A ( A__ , A__=None , A__=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ : List[Any] = """CPUExecutionProvider"""
return ort.InferenceSession(A__ , providers=[provider] , sess_options=A__ )
def __A ( self , A__ , A__ = None , **A__ ):
A__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ : List[Any] = self.model_save_dir.joinpath(self.latest_model_name )
A__ : Optional[int] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ : str = self.model_save_dir.joinpath(A__ )
if src_path.exists():
A__ : List[str] = Path(A__ ).joinpath(A__ )
try:
shutil.copyfile(A__ , A__ )
except shutil.SameFileError:
pass
def __A ( self , A__ , **A__ , ):
if os.path.isfile(A__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(A__ , exist_ok=A__ )
# saving model weights/files
self._save_pretrained(A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = None , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(A__ ):
A__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(A__ , A__ ) , provider=A__ , sess_options=A__ )
A__ : Optional[Any] = Path(A__ )
# load model from hub
else:
# download model
A__ : Union[str, Any] = hf_hub_download(
repo_id=A__ , filename=A__ , use_auth_token=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , )
A__ : List[str] = Path(A__ ).parent
A__ : str = Path(A__ ).name
A__ : Optional[int] = OnnxRuntimeModel.load_model(A__ , provider=A__ , sess_options=A__ )
return cls(model=A__ , **A__ )
@classmethod
def __A ( cls , A__ , A__ = True , A__ = None , A__ = None , **A__ , ):
A__ : Optional[Any] = None
if len(str(A__ ).split("""@""" ) ) == 2:
A__ , A__ : Union[str, Any] = model_id.split("""@""" )
return cls._from_pretrained(
model_id=A__ , revision=A__ , cache_dir=A__ , force_download=A__ , use_auth_token=A__ , **A__ , )
| 141 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
pass
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = data
UpperCamelCase = None
def __iter__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self
UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCamelCase_ )
yield node.data
UpperCamelCase = node.next_node
@property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 343 | 1 |
def UpperCamelCase ( ):
"""simple docstring"""
for n in range(1, 1000000 ):
yield n * (n + 1) // 2
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : Tuple = 1
__magic_name__ : Any = 2
while i * i <= n:
__magic_name__ : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def UpperCamelCase ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(A__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 351 |
import os
from pathlib import Path
def UpperCamelCase ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__magic_name__ : Dict = Path(_A ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__magic_name__ : Optional[int] = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""", """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""", """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""", _A, with_cuda=_A, extra_include_paths=[str(_A )], extra_cflags=["""-DWITH_CUDA=1"""], extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 138 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296 | 0 |
from __future__ import annotations
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
UpperCamelCase : str = number_of_bytes // partitions
UpperCamelCase : List[Any] = []
for i in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = i * bytes_per_partition + 1
UpperCamelCase : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __snake_case ):
def __init__( self , A_ , A_=None , A_=None , A_=0 ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase : Optional[int] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Union[str, Any] = args_dim
UpperCamelCase : str = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
UpperCamelCase : Union[str, Any] = domain_map
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = function
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
return self.function(A_ , *A_ )
class A__ :
_UpperCAmelCase :type
_UpperCAmelCase :int
_UpperCAmelCase :Dict[str, int]
def __init__( self , A_ = 1 ):
'''simple docstring'''
UpperCamelCase : Tuple = dim
UpperCamelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : str = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 0.0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase( self , *A_ ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_UpperCAmelCase :type = StudentT
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : int = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
_UpperCAmelCase :type = Normal
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
_UpperCAmelCase :type = NegativeBinomial
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 140 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = len(lowerCAmelCase )
for i in range(length - 1 ):
_lowerCAmelCase = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
_lowerCAmelCase = k
if least != i:
_lowerCAmelCase , _lowerCAmelCase = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A__ : str =input('''Enter numbers separated by a comma:\n''').strip()
A__ : Optional[int] =[int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 70 |
"""simple docstring"""
from math import pow, sqrt
def lowerCamelCase__ ( *_lowerCamelCase : float ) -> bool:
lowerCamelCase_ = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 183 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ () -> List[Any]:
__lowerCAmelCase : List[str] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__lowerCAmelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("""RGB""" )
return image
def snake_case_ (__A : int ) -> Optional[int]:
__lowerCAmelCase : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def snake_case_ (__A : Union[str, Any] , __A : List[str] , __A : Dict ) -> Tuple:
__lowerCAmelCase : str = dct.pop(_UpperCAmelCase )
__lowerCAmelCase : Optional[int] = val
def snake_case_ (__A : Optional[int] , __A : Optional[Any] ) -> int:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__lowerCAmelCase : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__lowerCAmelCase : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__lowerCAmelCase : Tuple = torch.cat((q_bias, torch.zeros_like(_UpperCAmelCase , requires_grad=_UpperCAmelCase ), v_bias) )
__lowerCAmelCase : Optional[Any] = qkv_bias
def snake_case_ (__A : Any ) -> Union[str, Any]:
__lowerCAmelCase : Optional[Any] = 3_6_4 if "coco" in model_name else 2_2_4
__lowerCAmelCase : Optional[int] = InstructBlipVisionConfig(image_size=_UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__lowerCAmelCase : int = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__lowerCAmelCase : Dict = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__lowerCAmelCase : Tuple = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
__lowerCAmelCase : Optional[int] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__lowerCAmelCase : Optional[int] = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
__lowerCAmelCase : int = InstructBlipConfig(vision_config=_UpperCAmelCase , text_config=_UpperCAmelCase , qformer_config=_UpperCAmelCase )
return config, image_size
@torch.no_grad()
def snake_case_ (__A : int , __A : Optional[Any]=None , __A : int=False ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
__lowerCAmelCase : int = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__lowerCAmelCase : Union[str, Any] = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
__lowerCAmelCase : str = get_blipa_config(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = InstructBlipForConditionalGeneration(_UpperCAmelCase ).eval()
__lowerCAmelCase : str = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__lowerCAmelCase : Any = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__lowerCAmelCase : Tuple = "cuda:1" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase : str = "cuda:2" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase : str = load_model_and_preprocess(
name=_UpperCAmelCase , model_type=_UpperCAmelCase , is_eval=_UpperCAmelCase , device=_UpperCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__lowerCAmelCase : Any = original_model.state_dict()
__lowerCAmelCase : Any = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__lowerCAmelCase : str = state_dict.pop(_UpperCAmelCase )
if key.startswith("""Qformer.bert""" ):
__lowerCAmelCase : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__lowerCAmelCase : Any = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
__lowerCAmelCase : Optional[int] = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
__lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
__lowerCAmelCase : Any = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
__lowerCAmelCase : str = key.replace("""t5""" , """language""" )
__lowerCAmelCase : Dict = val
# read in qv biases
read_in_q_v_bias(_UpperCAmelCase , _UpperCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
__lowerCAmelCase : Dict = load_demo_image()
__lowerCAmelCase : Dict = "What is unusual about this image?"
# create processor
__lowerCAmelCase : Dict = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase )
__lowerCAmelCase : List[str] = InstructBlipProcessor(
image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase , )
__lowerCAmelCase : Any = processor(images=_UpperCAmelCase , text=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# make sure processor creates exact same pixel values
__lowerCAmelCase : List[Any] = vis_processors["eval"](_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
__lowerCAmelCase : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCAmelCase )
original_model.to(_UpperCAmelCase )
hf_model.to(_UpperCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
__lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
__lowerCAmelCase : Optional[Any] = hf_model(**_UpperCAmelCase ).logits
else:
__lowerCAmelCase : str = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
__lowerCAmelCase : Optional[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(_UpperCAmelCase )
__lowerCAmelCase : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
__lowerCAmelCase : List[str] = hf_model(**_UpperCAmelCase , labels=_UpperCAmelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__lowerCAmelCase : Any = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCAmelCase , atol=_UpperCAmelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
__lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
__lowerCAmelCase : Tuple = hf_model.generate(
**_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__lowerCAmelCase : int = 2
print("""Original generation:""" , _UpperCAmelCase )
__lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowerCAmelCase : Dict = [text.strip() for text in output_text]
print("""HF generation:""" , _UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"""--model_name""",
default="""instructblip-flan-t5-xl""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : Union[str, Any] =MBartConfig
lowerCamelCase : Optional[Any] ={}
lowerCamelCase : Dict ="gelu"
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any]=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=20 , lowerCAmelCase : Any=2 , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : str=0 , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : str = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : int = seq_length
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : List[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Optional[Any] = pad_token_id
__lowerCAmelCase : int = bos_token_id
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase : int = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase : Tuple = prepare_mbart_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = TFMBartModel(config=lowerCAmelCase ).get_decoder()
__lowerCAmelCase : Tuple = inputs_dict["""input_ids"""]
__lowerCAmelCase : Optional[Any] = input_ids[:1, :]
__lowerCAmelCase : Union[str, Any] = inputs_dict["""attention_mask"""][:1, :]
__lowerCAmelCase : Tuple = inputs_dict["""head_mask"""]
__lowerCAmelCase : Any = 1
# first forward pass
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
__lowerCAmelCase ,__lowerCAmelCase : List[str] = outputs.to_tuple()
__lowerCAmelCase : Union[str, Any] = past_key_values[1]
def snake_case_ (__A : str , __A : Union[str, Any] , __A : Tuple , __A : Tuple=None , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Optional[int]=None , __A : Optional[Any]=None , ) -> int:
if attention_mask is None:
__lowerCAmelCase : Dict = tf.cast(tf.math.not_equal(__A , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCamelCase : List[str] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase : Union[str, Any] =(
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase : str =True
lowerCamelCase : Tuple =False
lowerCamelCase : Dict =False
def SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = TFMBartModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =[
" UN Chief Says There Is No Military Solution in Syria",
]
lowerCamelCase : Tuple =[
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
lowerCamelCase : List[Any] ="facebook/mbart-large-en-ro"
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.translate_src_text(**lowerCAmelCase )
self.assertListEqual(self.expected_text , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **lowerCAmelCase : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.tokenizer(self.src_text , **lowerCAmelCase , return_tensors="""tf""" )
__lowerCAmelCase : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCAmelCase : List[str] = self.tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 139 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Tuple ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
__lowercase =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('sample_euler' )
__lowercase ='A painting of a squirrel eating a burger'
__lowercase =torch.manual_seed(0 )
__lowercase =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase =np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('sample_euler' )
__lowercase ='A painting of a squirrel eating a burger'
__lowercase =torch.manual_seed(0 )
__lowercase =sd_pipe([prompt] , generator=__lowercase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase =np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
__lowercase =sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
__lowercase ='A painting of a squirrel eating a burger'
__lowercase =torch.manual_seed(0 )
__lowercase =sd_pipe(
[prompt] , generator=__lowercase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=__lowercase , )
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase =np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 141 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( A ):
def __init__( self : Optional[Any] , *__lowercase : str , **__lowercase : Union[str, Any] ):
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
__lowercase ={}
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , *__lowercase : Optional[int] , **__lowercase : int ):
"""simple docstring"""
__lowercase =super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
f'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
' `placeholder_token` that is not already in the tokenizer.' )
def snake_case ( self : int , __lowercase : List[Any] , *__lowercase : Union[str, Any] , __lowercase : Dict=1 , **__lowercase : Dict ):
"""simple docstring"""
__lowercase =[]
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
__lowercase =[]
for i in range(__lowercase ):
__lowercase =placeholder_token + f'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'''The tokenizer already has placeholder token {token} that can get confused with'''
f''' {placeholder_token}keep placeholder tokens independent''' )
__lowercase =output
def snake_case ( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Optional[int]=1.0 ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
__lowercase =[]
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase =self.token_map[placeholder_token]
__lowercase =tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase =copy.copy(__lowercase )
random.shuffle(__lowercase )
__lowercase =text.replace(__lowercase , ' '.join(__lowercase ) )
return text
def __call__( self : int , __lowercase : List[Any] , *__lowercase : Tuple , __lowercase : Optional[Any]=False , __lowercase : Dict=1.0 , **__lowercase : List[Any] ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def snake_case ( self : Dict , __lowercase : List[str] , *__lowercase : Tuple , __lowercase : Dict=False , __lowercase : List[str]=1.0 , **__lowercase : Optional[int] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 141 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( __A, __A, __A, __A=1_024 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = list(zip(__A, __A ) )
UpperCAmelCase__ , UpperCAmelCase__ = sorted_examples[0]
def is_too_big(__A ):
return tok(__A, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase__ = new_src + " " + src
UpperCAmelCase__ = new_tgt + " " + tgt
if is_too_big(__A ) or is_too_big(__A ): # cant fit, finalize example
finished_src.append(__A )
finished_tgt.append(__A )
UpperCAmelCase__ , UpperCAmelCase__ = src, tgt
else: # can fit, keep adding
UpperCAmelCase__ , UpperCAmelCase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__A )
finished_tgt.append(__A )
return finished_src, finished_tgt
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = Path(__A )
save_path.mkdir(exist_ok=__A )
for split in ["train"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
UpperCAmelCase__ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase__ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase__ , UpperCAmelCase__ = pack_examples(__A, __A, __A, __A )
print(f"""packed {split} split from {len(__A )} examples -> {len(__A )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(__A ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(__A ) )
for split in ["val", "test"]:
UpperCAmelCase__ , UpperCAmelCase__ = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(__A, save_path / f"""{split}.source""" )
shutil.copyfile(__A, save_path / f"""{split}.target""" )
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=__A, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=__A, default=128 )
parser.add_argument("--data_dir", type=__A )
parser.add_argument("--save_path", type=__A )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__A, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 143 | from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Any = []
for i in range(self.num_layers ):
__lowerCAmelCase : Tuple = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = resnets
__lowerCAmelCase : Optional[int] = attentions
if self.add_downsample:
__lowerCAmelCase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCAmelCase : Tuple = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Tuple = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : str = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = resnets
if self.add_downsample:
__lowerCAmelCase : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : List[str] = ()
for resnet in self.resnets:
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Union[str, Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Union[str, Any] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Optional[Any] = attentions
if self.add_upsample:
__lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[str] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : Union[str, Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Optional[Any] = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
for i in range(self.num_layers ):
__lowerCAmelCase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = resnets
if self.add_upsample:
__lowerCAmelCase : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase : Any = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
# there is always at least one resnet
__lowerCAmelCase : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCAmelCase : List[Any] = []
for _ in range(self.num_layers ):
__lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Union[str, Any] = attentions
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : int = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCAmelCase : int = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 86 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__A : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "dhaka", _UpperCAmelCase = 5 ) -> int:
'''simple docstring'''
lowerCAmelCase : List[Any] = min(_UpperCAmelCase, 50 ) # Prevent abuse!
lowerCAmelCase : str = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
lowerCAmelCase : Optional[Any] = requests.get('https://www.google.com/search', params=_UpperCAmelCase, headers=_UpperCAmelCase )
lowerCAmelCase : int = BeautifulSoup(html.text, 'html.parser' )
lowerCAmelCase : List[Any] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);', str(soup.select('script' ) ) ) )
lowerCAmelCase : Optional[int] = json.dumps(_UpperCAmelCase )
lowerCAmelCase : str = json.loads(_UpperCAmelCase )
lowerCAmelCase : str = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",', _UpperCAmelCase, )
if not matched_google_image_data:
return 0
lowerCAmelCase : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]', '', str(_UpperCAmelCase ), )
lowerCAmelCase : Dict = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]', _UpperCAmelCase, )
for index, fixed_full_res_image in enumerate(_UpperCAmelCase ):
if index >= max_images:
return index
lowerCAmelCase : Any = bytes(_UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
lowerCAmelCase : Tuple = bytes(_UpperCAmelCase, 'ascii' ).decode(
'unicode-escape' )
lowerCAmelCase : Optional[Any] = urllib.request.build_opener()
lowerCAmelCase : Any = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_UpperCAmelCase )
lowerCAmelCase : List[str] = f"query_{query.replace(' ', '_' )}"
if not os.path.exists(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
_UpperCAmelCase, f"{path_name}/original_size_img_{index}.jpg" )
return index
if __name__ == "__main__":
try:
__A : Tuple = download_images_from_google_query(sys.argv[1])
print(F'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 138 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class lowercase ( a_ ):
_a = "switch_transformers"
_a = ["past_key_values"]
_a = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _a=3_2128 , _a=768 , _a=64 , _a=2048 , _a=64 , _a=12 , _a=3 , _a=12 , _a=3 , _a=12 , _a=8 , _a=False , _a=0.01 , _a="float32" , _a=False , _a=32 , _a=128 , _a=0.1 , _a=1e-6 , _a=0.001 , _a=0.001 , _a=1.0 , _a="relu" , _a=True , _a=False , _a=True , _a=0 , _a=1 , **_a , ) -> List[Any]:
_A : str = vocab_size
_A : Dict = d_model
_A : str = d_kv
_A : Optional[int] = d_ff
_A : str = num_sparse_encoder_layers
_A : Optional[Any] = num_layers
_A : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A : Tuple = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_A : str = self.num_layers // self.num_sparse_encoder_layers
else:
_A : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_A : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_A : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
_A : List[str] = num_heads
_A : Union[str, Any] = num_experts
_A : Dict = expert_capacity
_A : str = router_bias
_A : Tuple = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_A : Optional[int] = router_dtype
_A : Optional[Any] = router_ignore_padding_tokens
_A : List[Any] = relative_attention_num_buckets
_A : Optional[int] = relative_attention_max_distance
_A : int = dropout_rate
_A : List[str] = layer_norm_epsilon
_A : int = initializer_factor
_A : Tuple = feed_forward_proj
_A : List[Any] = use_cache
_A : List[Any] = add_router_probs
_A : Optional[int] = router_z_loss_coef
_A : Union[str, Any] = router_aux_loss_coef
_A : Optional[Any] = self.feed_forward_proj.split("""-""" )
_A : Optional[Any] = act_info[-1]
_A : Optional[Any] = act_info[0] == '''gated'''
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_A : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , )
| 358 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowercase , **lowercase ):
"""simple docstring"""
return super().__call__(lowercase , **lowercase )
def lowerCAmelCase_ ( self , **lowercase ):
"""simple docstring"""
A_ : str = {}
if "candidate_labels" in kwargs:
A_ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A_ : Optional[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase_ ( self , lowercase , lowercase=None , lowercase="This is a photo of {}." ):
"""simple docstring"""
A_ : Union[str, Any] = load_image(lowercase )
A_ : Tuple = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : List[Any] = candidate_labels
A_ : List[Any] = [hypothesis_template.format(lowercase ) for x in candidate_labels]
A_ : str = self.tokenizer(lowercase , return_tensors=self.framework , padding=lowercase )
A_ : Union[str, Any] = [text_inputs]
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = model_inputs.pop('candidate_labels' )
A_ : List[Any] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowercase ):
A_ : Dict = text_inputs[0]
else:
# Batching case.
A_ : Optional[Any] = text_inputs[0][0]
A_ : List[Any] = self.model(**lowercase , **lowercase )
A_ : Union[str, Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = model_outputs.pop('candidate_labels' )
A_ : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
A_ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : Optional[Any] = probs.tolist()
if not isinstance(lowercase , lowercase ):
A_ : List[str] = [scores]
elif self.framework == "tf":
A_ : Any = stable_softmax(lowercase , axis=-1 )
A_ : Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
A_ : Dict = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowercase , lowercase ) , key=lambda lowercase : -x[0] )
]
return result
| 140 | from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int]=8 ):
'''simple docstring'''
A_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : List[str] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : List[str] = latents.to(lowercase )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ , A_ : int = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
A_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
A_ : Dict = self._execution_device
A_ : Dict = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
A_ : Dict = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : str = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.cat(lowercase , dim=0 )
A_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Optional[Any] = hint.repeat_interleave(lowercase , dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
A_ : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Any = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_ , A_ : List[Any] = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {'image_embeds': image_embeds, 'hint': hint}
A_ : Dict = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ , A_ : List[str] = variance_pred.chunk(2 )
A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ , A_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
A_ : Any = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A_ : Optional[Any] = image * 0.5 + 0.5
A_ : int = image.clamp(0 , 1 )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 140 | 1 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int =CLIPConfig
a : Dict =["""CLIPEncoderLayer"""]
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(_a )
__lowerCAmelCase = CLIPVisionModelWithProjection(config.vision_config )
__lowerCAmelCase = nn.Linear(config.vision_config.projection_dim,1 )
__lowerCAmelCase = nn.Linear(config.vision_config.projection_dim,1 )
@torch.no_grad()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0.5,__SCREAMING_SNAKE_CASE=0.5 ):
'''simple docstring'''
__lowerCAmelCase = self.vision_model(_a )[0]
__lowerCAmelCase = self.p_head(_a )
__lowerCAmelCase = nsfw_detected.flatten()
__lowerCAmelCase = nsfw_detected > p_threshold
__lowerCAmelCase = nsfw_detected.tolist()
if any(_a ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(_a ):
if nsfw_detected_:
__lowerCAmelCase = np.zeros(images[idx].shape )
__lowerCAmelCase = self.w_head(_a )
__lowerCAmelCase = watermark_detected.flatten()
__lowerCAmelCase = watermark_detected > w_threshold
__lowerCAmelCase = watermark_detected.tolist()
if any(_a ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(_a ):
if watermark_detected_:
__lowerCAmelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 369 |
'''simple docstring'''
import sys
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(lowercase , lowercase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _a ( __a ):
__a : List[str] = """align_text_model"""
def __init__( self : Dict , lowercase : str=30_522 , lowercase : List[Any]=768 , lowercase : Union[str, Any]=12 , lowercase : Optional[Any]=12 , lowercase : Union[str, Any]=3_072 , lowercase : Tuple="gelu" , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : Optional[int]=512 , lowercase : Union[str, Any]=2 , lowercase : Dict=0.02 , lowercase : Tuple=1E-12 , lowercase : Any=0 , lowercase : Any="absolute" , lowercase : str=True , **lowercase : Any , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = pad_token_id
@classmethod
def A ( cls : Any , lowercase : Union[str, os.PathLike] , **lowercase : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : Dict = """align_vision_model"""
def __init__( self : int , lowercase : int = 3 , lowercase : int = 600 , lowercase : float = 2.0 , lowercase : float = 3.1 , lowercase : int = 8 , lowercase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowercase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowercase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowercase : List[int] = [] , lowercase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowercase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowercase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowercase : float = 0.25 , lowercase : str = "swish" , lowercase : int = 2_560 , lowercase : str = "mean" , lowercase : float = 0.02 , lowercase : float = 0.001 , lowercase : float = 0.99 , lowercase : float = 0.2 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase )
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = width_coefficient
UpperCAmelCase = depth_coefficient
UpperCAmelCase = depth_divisor
UpperCAmelCase = kernel_sizes
UpperCAmelCase = in_channels
UpperCAmelCase = out_channels
UpperCAmelCase = depthwise_padding
UpperCAmelCase = strides
UpperCAmelCase = num_block_repeats
UpperCAmelCase = expand_ratios
UpperCAmelCase = squeeze_expansion_ratio
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dim
UpperCAmelCase = pooling_type
UpperCAmelCase = initializer_range
UpperCAmelCase = batch_norm_eps
UpperCAmelCase = batch_norm_momentum
UpperCAmelCase = drop_connect_rate
UpperCAmelCase = sum(lowercase ) * 4
@classmethod
def A ( cls : Optional[Any] , lowercase : Union[str, os.PathLike] , **lowercase : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase )
UpperCAmelCase , UpperCAmelCase = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowercase , **lowercase )
class _a ( __a ):
__a : List[Any] = """align"""
__a : str = True
def __init__( self : Optional[int] , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=640 , lowercase : Optional[int]=1.0 , lowercase : List[Any]=0.02 , **lowercase : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase )
if text_config is None:
UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
UpperCAmelCase = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
UpperCAmelCase = AlignTextConfig(**lowercase )
UpperCAmelCase = AlignVisionConfig(**lowercase )
UpperCAmelCase = projection_dim
UpperCAmelCase = temperature_init_value
UpperCAmelCase = initializer_range
@classmethod
def A ( cls : str , lowercase : AlignTextConfig , lowercase : AlignVisionConfig , **lowercase : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.text_config.to_dict()
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 34 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
def __init__( self : Optional[int] ,*SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
| 139 | 0 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCamelCase = name.replace(A__ , A__ )
return F"""bert/{name}"""
def create_tf_var(A__ , A__ , A__ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(A__ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ )
tf.keras.backend.set_value(A__ , A__ )
UpperCamelCase = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowerCamelCase ( A__=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(A__ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 249 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = generator('Something there' )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
UpperCamelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
UpperCamelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
[{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
UpperCamelCase = 3
UpperCamelCase = generator(
'Something there' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
UpperCamelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = generator('This is a test' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
UpperCamelCase = generator.model.config.eos_token_id
UpperCamelCase = '<pad>'
UpperCamelCase = generator(
['This is a test', 'This is a second test'] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
UpperCamelCase = generator('Something there' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'generated_text': ''}] )
| 249 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __snake_case ( yaml.SafeLoader ):
def __a ( self , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case__ : Tuple = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
snake_case__ : Optional[int] = Counter(__UpperCamelCase )
snake_case__ : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def __a ( self , __UpperCamelCase , __UpperCamelCase=False ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[int] = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def UpperCamelCase__ ( A__ ) -> Tuple[Optional[str], str]:
snake_case__ : Optional[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case__ : Optional[Any] = full_content[1:].index('---' ) + 1
snake_case__ : Optional[Any] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(A__ )
class __snake_case ( _lowerCamelCase ):
# class attributes
__lowerCamelCase = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase , encoding='utf-8' ) as readme_file:
snake_case__ , snake_case__ : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase , encoding='utf-8' ) as readme_file:
snake_case__ : Dict = readme_file.read()
else:
snake_case__ : List[Any] = None
snake_case__ : List[str] = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def __a ( self , __UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
snake_case__ , snake_case__ : str = _split_yaml_from_readme(__UpperCamelCase )
snake_case__ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
snake_case__ : List[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __a ( cls , __UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
snake_case__ : Dict = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case__ : str = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def __a ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='utf-8' , ).decode('utf-8' )
lowerCAmelCase__ : Dict = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCAmelCase__ : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowerCAmelCase__ : Dict = ap.parse_args()
lowerCAmelCase__ : Any = Path(args.readme_filepath)
lowerCAmelCase__ : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 143 | import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__=False ) -> List[Any]:
snake_case__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
snake_case__ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def UpperCamelCase__ ( A__ , A__ , A__=False ) -> Dict:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ''
else:
snake_case__ : List[Any] = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
snake_case__ : List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : Optional[Any] = in_proj_bias[: config.hidden_size]
snake_case__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : int = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( A__ , A__ , A__ ) -> str:
snake_case__ : Optional[int] = dct.pop(A__ )
snake_case__ : int = val
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case__ : Dict = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
snake_case__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
snake_case__ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Any = 1000
snake_case__ : Union[str, Any] = 'huggingface/label-files'
snake_case__ : int = 'imagenet-1k-id2label.json'
snake_case__ : str = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : int = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : List[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = int(deit_name[-6:-4] )
snake_case__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
snake_case__ : Optional[int] = 192
snake_case__ : str = 768
snake_case__ : Optional[Any] = 12
snake_case__ : Tuple = 3
elif deit_name[9:].startswith('small' ):
snake_case__ : str = 384
snake_case__ : str = 1536
snake_case__ : Dict = 12
snake_case__ : str = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
snake_case__ : List[Any] = 1024
snake_case__ : str = 4096
snake_case__ : Tuple = 24
snake_case__ : Tuple = 16
# load original model from timm
snake_case__ : Optional[int] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : Optional[Any] = timm_model.state_dict()
snake_case__ : Tuple = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
snake_case__ : int = DeiTForImageClassificationWithTeacher(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by DeiTImageProcessor
snake_case__ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
snake_case__ : List[Any] = DeiTImageProcessor(size=A__ , crop_size=config.image_size )
snake_case__ : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
snake_case__ : Tuple = encoding['pixel_values']
snake_case__ : Dict = model(A__ )
snake_case__ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ : int = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 143 | 1 |
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224 |
import argparse
import json
import subprocess
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
lowercase__ = (
f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
lowercase__ = subprocess.run(SCREAMING_SNAKE_CASE_ , shell=SCREAMING_SNAKE_CASE_ , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode("utf-8" )
lowercase__ = json.loads(SCREAMING_SNAKE_CASE_ )
lowercase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(SCREAMING_SNAKE_CASE_ )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return values.split("," )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 224 | 1 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowercase = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , 'sklearn' )
return (preds == labels).mean()
def _snake_case ( snake_case__ : Any , snake_case__ : Union[str, Any] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , 'sklearn' )
A = simple_accuracy(snake_case__ , snake_case__ )
A = fa_score(y_true=snake_case__ , y_pred=snake_case__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , 'sklearn' )
A = pearsonr(snake_case__ , snake_case__ )[0]
A = spearmanr(snake_case__ , snake_case__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _snake_case ( snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : str ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , 'sklearn' )
assert len(snake_case__ ) == len(snake_case__ ), F'Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(snake_case__ , snake_case__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "mrpc":
return acc_and_fa(snake_case__ , snake_case__ )
elif task_name == "sts-b":
return pearson_and_spearman(snake_case__ , snake_case__ )
elif task_name == "qqp":
return acc_and_fa(snake_case__ , snake_case__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(snake_case__ )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
warnings.warn(snake_case__ , snake_case__ )
requires_backends(snake_case__ , 'sklearn' )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(snake_case__ ) | 74 | import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = """ """.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = """""".join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCamelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = """ """ + text
return (text, kwargs)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 343 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_prompt=lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=lowercase_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_special_tokens=lowercase_)
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(lowercase_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = TextIteratorStreamer(lowercase_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 371 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase__ = {'''facebook/blenderbot_small-90M''': 512}
def lowerCAmelCase__ ( a__ ) ->Any:
'''simple docstring'''
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
_UpperCamelCase = set(a__ )
return pairs
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any]="__start__" , lowercase_ : Optional[int]="__end__" , lowercase_ : List[Any]="__unk__" , lowercase_ : List[str]="__null__" , **lowercase_ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_)
with open(lowercase_ , encoding="utf-8") as vocab_handle:
_UpperCamelCase = json.load(lowercase_)
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase_ , encoding="utf-8") as merges_handle:
_UpperCamelCase = merges_handle.read().split("\n")[1:-1]
_UpperCamelCase = [tuple(merge.split()) for merge in merges]
_UpperCamelCase = dict(zip(lowercase_ , range(len(lowercase_))))
_UpperCamelCase = {}
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return len(self.encoder)
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCAmelCase ( self : Tuple , lowercase_ : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCamelCase = re.sub("([.,!?()])" , R" \1" , lowercase_)
_UpperCamelCase = re.sub("(')" , R" \1 " , lowercase_)
_UpperCamelCase = re.sub(R"\s{2,}" , " " , lowercase_)
if "\n" in token:
_UpperCamelCase = token.replace("\n" , " __newln__")
_UpperCamelCase = token.split(" ")
_UpperCamelCase = []
for token in tokens:
if not len(lowercase_):
continue
_UpperCamelCase = token.lower()
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_UpperCamelCase = get_pairs(lowercase_)
if not pairs:
words.append(lowercase_)
continue
while True:
_UpperCamelCase = min(lowercase_ , key=lambda lowercase_: self.bpe_ranks.get(lowercase_ , float("inf")))
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowercase_):
try:
_UpperCamelCase = word.index(lowercase_ , lowercase_)
new_word.extend(word[i:j])
_UpperCamelCase = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCamelCase = tuple(lowercase_)
_UpperCamelCase = new_word
if len(lowercase_) == 1:
break
else:
_UpperCamelCase = get_pairs(lowercase_)
_UpperCamelCase = "@@ ".join(lowercase_)
_UpperCamelCase = word[:-4]
_UpperCamelCase = word
words.append(lowercase_)
return " ".join(lowercase_)
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : str) -> List[str]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = re.findall(R"\S+\n?" , lowercase_)
for token in words:
split_tokens.extend(list(self.bpe(lowercase_).split(" ")))
return split_tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
_UpperCamelCase = token.lower()
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token))
def __UpperCAmelCase ( self : Any , lowercase_ : int) -> str:
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = " ".join(lowercase_).replace("@@ " , "").strip()
return out_string
def __UpperCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowercase_ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_) + "\n")
_UpperCamelCase = 0
with open(lowercase_ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_: kv[1]):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!")
_UpperCamelCase = token_index
writer.write(" ".join(lowercase_) + "\n")
index += 1
return vocab_file, merge_file
| 63 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
a : Union[str, Any] = s_dict.pop(A_ )
elif "subsample" in key:
a : Optional[Any] = s_dict.pop(A_ )
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a , a : int = emb.weight.shape
a : List[str] = nn.Linear(A_ , A_ , bias=A_ )
a : List[Any] = emb.weight.data
return lin_layer
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
a : Optional[Any] = torch.load(A_ , map_location="cpu" )
a : Union[str, Any] = mam_aaa["args"]
a : Tuple = mam_aaa["model"]
a : Tuple = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(A_ )
rename_keys(A_ )
a : str = state_dict["decoder.embed_tokens.weight"].shape[0]
a : List[Any] = args.share_decoder_input_output_embed
a : str = [int(A_ ) for i in args.conv_kernel_sizes.split("," )]
a : Optional[int] = SpeechaTextConfig(
vocab_size=A_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(A_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=A_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A_ , num_beams=5 , max_length=200 , use_cache=A_ , decoder_start_token_id=2 , early_stopping=A_ , )
a : Dict = SpeechaTextForConditionalGeneration(A_ )
a , a : List[Any] = model.model.load_state_dict(A_ , strict=A_ )
if len(A_ ) > 0 and not set(A_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
a : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a : Union[str, Any] = lm_head_weights
model.save_pretrained(A_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 40 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46 | 0 |
from math import pi, sqrt, tan
def a_ ( lowerCAmelCase_ : float ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a_ ( lowerCAmelCase_ : float ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def a_ ( lowerCAmelCase_ : float ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCAmelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCAmelCase_, 2 ) * torus_radius * tube_radius
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def a_ ( lowerCAmelCase_ : float ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCAmelCase = (sidea + sidea + sidea) / 2
__lowerCAmelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def a_ ( lowerCAmelCase_ : float ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : float ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 368 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = [1]
__lowercase ,__lowercase ,__lowercase : List[str] = 0, 0, 0
__lowercase : List[str] = ugly_nums[ia] * 2
__lowercase : Any = ugly_nums[ia] * 3
__lowercase : str = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
__lowercase : Union[str, Any] = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
__lowercase : List[str] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__lowercase : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__lowercase : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_0_0) = }")
| 249 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
sd_pipe.set_scheduler("sample_euler" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe([prompt] , generator=_UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def lowercase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sd_pipe(
[prompt] , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=_UpperCAmelCase , )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 364 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 2000000 ):
UpperCAmelCase_ = [0 for i in range(n + 1 )]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase__ ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
for i in range(lowerCAmelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"{solution() = }")
| 241 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Any , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Dict , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Any ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[str] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
requires_backends(cls , ['flax'] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flax"""]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : int ):
requires_backends(self , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
requires_backends(cls , ['flax'] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Tuple ):
requires_backends(cls , ['flax'] )
| 224 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ : str = tf.config.list_physical_devices('CPU' )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ : Dict = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase_ : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ : Union[str, Any] = GradientAccumulator()
lowerCAmelCase_ : int = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = create_optimizer(5E-5 , 1_0 , 5 )
lowerCAmelCase_ : Union[str, Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE_ : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with strategy.scope():
lowerCAmelCase_ : Tuple = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ )
local_variables[0].assign(SCREAMING_SNAKE_CASE_ )
local_variables[1].assign(SCREAMING_SNAKE_CASE_ )
strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE_ )
def _check_local_values(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 224 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
A = "2020.9.26"
A = "xcodz-dot, cclaus, dhruvmanila"
def __A ( a_ :float , a_ :float , a_ :float , a_ :float , a_ :float) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase_ , (float, int)) for val in locals().values()):
__a : Union[str, Any] = F"""Input values must either be float or int: {list(locals().values())}"""
raise TypeError(UpperCAmelCase_)
__a : List[Any] = ((x * distance) / (z + distance)) * scale
__a : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __A ( a_ :float , a_ :float , a_ :float , a_ :str , a_ :float) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
raise TypeError('''Axis must be a str''')
__a : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase_ , (float, int)) for val in input_variables.values()):
__a : Optional[Any] = (
'Input values except axis must either be float or int: '
F"""{list(input_variables.values())}"""
)
raise TypeError(UpperCAmelCase_)
__a : Dict = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
__a : List[str] = x * math.cos(UpperCAmelCase_) - y * math.sin(UpperCAmelCase_)
__a : str = y * math.cos(UpperCAmelCase_) + x * math.sin(UpperCAmelCase_)
__a : str = z
elif axis == "x":
__a : Union[str, Any] = y * math.cos(UpperCAmelCase_) - z * math.sin(UpperCAmelCase_)
__a : Union[str, Any] = z * math.cos(UpperCAmelCase_) + y * math.sin(UpperCAmelCase_)
__a : Union[str, Any] = x
elif axis == "y":
__a : Tuple = x * math.cos(UpperCAmelCase_) - z * math.sin(UpperCAmelCase_)
__a : List[str] = z * math.cos(UpperCAmelCase_) + x * math.sin(UpperCAmelCase_)
__a : Union[str, Any] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''')
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(F'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }') | 357 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=1 , ):
__a : Dict = parent
__a : str = batch_size
__a : Union[str, Any] = seq_length
__a : Any = is_training
__a : int = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : int = use_labels
__a : int = vocab_size
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : List[str] = type_sequence_label_size
__a : List[str] = initializer_range
__a : Optional[int] = num_labels
__a : List[str] = num_choices
__a : int = scope
__a : Union[str, Any] = q_groups
__a : Dict = k_groups
__a : List[str] = v_groups
__a : Any = post_attention_groups
__a : Optional[int] = intermediate_groups
__a : List[str] = output_groups
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
__a : Union[str, Any] = None
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Dict = ids_tensor([self.batch_size] , self.num_choices )
__a : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = self.num_labels
__a : List[Any] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = self.num_labels
__a : List[str] = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self.num_choices
__a : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Optional[Any] = config_and_inputs
__a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SqueezeBertModelTester(self )
__a : Dict = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__a : Tuple = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__a : List[str] = model(_UpperCAmelCase )[0]
__a : int = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) ) | 188 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_SCREAMING_SNAKE_CASE : Any = get_tests_dir('''fixtures''')
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
_SCREAMING_SNAKE_CASE : Dict = get_tests_dir('''fixtures/dummy-config.json''')
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = 0
def lowercase_ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__a , __a )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop('''feature_extractor_type''' )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__a , __a )
def lowercase_ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__a , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def lowercase_ ( self : Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(
__a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a , revision='''aaaaaa''' )
def lowercase_ ( self : List[Any] ) -> Any:
with self.assertRaisesRegex(
__a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase_ ( self : List[Any] ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def lowercase_ ( self : Any ) -> Any:
try:
AutoConfig.register('''custom''' , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ) -> List[str]:
class UpperCAmelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
a = True
try:
AutoConfig.register('''custom''' , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 314 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def update_area_of_max_square(__lowerCAmelCase , __lowerCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCAmelCase : Optional[Any] = update_area_of_max_square(__lowerCAmelCase , col + 1 )
_UpperCAmelCase : Union[str, Any] = update_area_of_max_square(row + 1 , col + 1 )
_UpperCAmelCase : str = update_area_of_max_square(row + 1 , __lowerCAmelCase )
if mat[row][col]:
_UpperCAmelCase : str = 1 + min([right, diagonal, down] )
_UpperCAmelCase : Dict = max(largest_square_area[0] , __lowerCAmelCase )
return sub_problem_sol
else:
return 0
_UpperCAmelCase : Union[str, Any] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def update_area_of_max_square_using_dp_array(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCAmelCase : str = update_area_of_max_square_using_dp_array(__lowerCAmelCase , col + 1 , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __lowerCAmelCase )
_UpperCAmelCase : Any = update_area_of_max_square_using_dp_array(row + 1 , __lowerCAmelCase , __lowerCAmelCase )
if mat[row][col]:
_UpperCAmelCase : Any = 1 + min([right, diagonal, down] )
_UpperCAmelCase : List[str] = max(largest_square_area[0] , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCAmelCase : Optional[Any] = [0]
_UpperCAmelCase : Union[str, Any] = [[-1] * cols for _ in range(__lowerCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __lowerCAmelCase )
return largest_square_area[0]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCAmelCase : Dict = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase : Union[str, Any] = dp_array[row][col + 1]
_UpperCAmelCase : str = dp_array[row + 1][col + 1]
_UpperCAmelCase : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCAmelCase : Tuple = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[str] = max(dp_array[row][col] , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[Any] = 0
return largest_square_area
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = [0] * (cols + 1)
_UpperCAmelCase : Optional[Any] = [0] * (cols + 1)
_UpperCAmelCase : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase : List[Any] = current_row[col + 1]
_UpperCAmelCase : Union[str, Any] = next_row[col + 1]
_UpperCAmelCase : List[str] = next_row[col]
if mat[row][col] == 1:
_UpperCAmelCase : Any = 1 + min(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : int = max(current_row[col] , __lowerCAmelCase )
else:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 356 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322 | 0 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _snake_case ( lowercase__ ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : str , snake_case : int = 32 , snake_case : int = 64 , snake_case : int = 20 , snake_case : int = 768 , snake_case : Tuple=77 , snake_case : List[Any]=4 , snake_case : float = 0.0 , snake_case : str = "silu" , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = "linear" , snake_case : Optional[str] = "prd" , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , ) -> List[str]:
super().__init__()
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : List[str] = attention_head_dim
__UpperCAmelCase : int = num_attention_heads * attention_head_dim
__UpperCAmelCase : List[Any] = additional_embeddings
__UpperCAmelCase : Any = time_embed_dim or inner_dim
__UpperCAmelCase : Any = embedding_proj_dim or embedding_dim
__UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim
__UpperCAmelCase : List[Any] = Timesteps(snake_case , snake_case , 0 )
__UpperCAmelCase : Optional[Any] = TimestepEmbedding(snake_case , snake_case , out_dim=snake_case , act_fn=snake_case )
__UpperCAmelCase : str = nn.Linear(snake_case , snake_case )
if embedding_proj_norm_type is None:
__UpperCAmelCase : str = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase : str = nn.LayerNorm(snake_case )
else:
raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
__UpperCAmelCase : List[Any] = nn.Linear(snake_case , snake_case )
if encoder_hid_proj_type is None:
__UpperCAmelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
else:
raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
__UpperCAmelCase : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case ) )
if added_emb_type == "prd":
__UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , 1 , snake_case ) )
elif added_emb_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(
f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
__UpperCAmelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
snake_case , snake_case , snake_case , dropout=snake_case , activation_fn='''gelu''' , attention_bias=snake_case , )
for d in range(snake_case )
] )
if norm_in_type == "layer":
__UpperCAmelCase : Tuple = nn.LayerNorm(snake_case )
elif norm_in_type is None:
__UpperCAmelCase : List[Any] = None
else:
raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' )
__UpperCAmelCase : Dict = nn.LayerNorm(snake_case )
__UpperCAmelCase : Any = nn.Linear(snake_case , snake_case )
__UpperCAmelCase : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , snake_case , persistent=snake_case )
__UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , snake_case ) )
__UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase__ ( self : int ) -> Dict[str, AttentionProcessor]:
__UpperCAmelCase : Optional[Any] = {}
def fn_recursive_add_processors(snake_case : str , snake_case : torch.nn.Module , snake_case : Dict[str, AttentionProcessor] ):
if hasattr(snake_case , '''set_processor''' ):
__UpperCAmelCase : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'{name}.{sub_name}' , snake_case , snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(snake_case , snake_case , snake_case )
return processors
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(snake_case , snake_case ) and len(snake_case ) != count:
raise ValueError(
f'A dict of processors was passed, but the number of processors {len(snake_case )} does not match the'
f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(snake_case : str , snake_case : torch.nn.Module , snake_case : int ):
if hasattr(snake_case , '''set_processor''' ):
if not isinstance(snake_case , snake_case ):
module.set_processor(snake_case )
else:
module.set_processor(processor.pop(f'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'{name}.{sub_name}' , snake_case , snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(snake_case , snake_case , snake_case )
def lowerCamelCase__ ( self : str ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[torch.Tensor, float, int] , snake_case : torch.FloatTensor , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.BoolTensor] = None , snake_case : bool = True , ) -> List[Any]:
__UpperCAmelCase : Any = hidden_states.shape[0]
__UpperCAmelCase : Optional[int] = timestep
if not torch.is_tensor(snake_case ):
__UpperCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0:
__UpperCAmelCase : Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps * torch.ones(snake_case , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase : Tuple = self.time_proj(snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase : int = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase : Optional[int] = self.time_embedding(snake_case )
if self.embedding_proj_norm is not None:
__UpperCAmelCase : Optional[Any] = self.embedding_proj_norm(snake_case )
__UpperCAmelCase : str = self.embedding_proj(snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase : Dict = self.encoder_hidden_states_proj(snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCAmelCase : Optional[int] = self.proj_in(snake_case )
__UpperCAmelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase : Union[str, Any] = hidden_states[:, None, :]
__UpperCAmelCase : Union[str, Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case , -1 , -1 )
additional_embeds.append(snake_case )
__UpperCAmelCase : Dict = torch.cat(
snake_case , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase : Union[str, Any] = F.pad(
snake_case , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase : Optional[int] = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0
__UpperCAmelCase : str = F.pad(snake_case , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase : str = self.norm_in(snake_case )
for block in self.transformer_blocks:
__UpperCAmelCase : Optional[int] = block(snake_case , attention_mask=snake_case )
__UpperCAmelCase : int = self.norm_out(snake_case )
if self.prd_embedding is not None:
__UpperCAmelCase : Optional[int] = hidden_states[:, -1]
else:
__UpperCAmelCase : List[Any] = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase : Dict = self.proj_to_clip_embeddings(snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] ) -> str:
__UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 240 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : Any , snake_case : Tuple=13 , snake_case : Any=10 , snake_case : Any=3 , snake_case : Dict=2 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : List[Any]=32 , snake_case : Dict=5 , snake_case : List[str]=4 , snake_case : Dict=37 , snake_case : Any="gelu" , snake_case : Optional[int]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=10 , snake_case : Dict=0.02 , snake_case : Tuple="divided_space_time" , snake_case : List[Any]=None , ) -> Optional[int]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : List[str] = num_frames
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Any = attention_type
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = scope
__UpperCAmelCase : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCAmelCase : str = (image_size // patch_size) ** 2
__UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : str = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCAmelCase : Optional[int] = self.num_labels
return config
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = TimesformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case )
# verify the logits shape
__UpperCAmelCase : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase__ ( self : int ) -> str:
__UpperCAmelCase : Tuple = TimesformerModelTester(self )
__UpperCAmelCase : str = ConfigTester(
self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Any ) -> Dict:
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(snake_case )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TimesformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
if not self.has_attentions:
pass
else:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = self.model_tester.seq_length
__UpperCAmelCase : int = self.model_tester.num_frames
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCAmelCase : Tuple = len(snake_case )
# Check attention is always last and order is fine
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
def check_hidden_states_output(snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : int = outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case ) , snake_case )
__UpperCAmelCase : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__UpperCAmelCase : int = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
snake_case )
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : Dict = prepare_video()
__UpperCAmelCase : Union[str, Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**snake_case )
# verify the logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCAmelCase : List[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) ) | 240 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCAmelCase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__UpperCAmelCase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__UpperCAmelCase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _UpperCamelCase ( self , _A , _A , _A = 1 , _A = 4 , ) -> Optional[Any]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a_ , hypotheses=a_ , min_len=a_ , max_len=a_ )
}
| 299 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : int , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : Optional[int]=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[Any]=99 , a_ : List[Any]=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Dict=5_12 , a_ : Union[str, Any]=16 , a_ : Optional[int]=2 , a_ : Dict=0.02 , a_ : List[str]=False , a_ : str=True , a_ : Any="None" , a_ : Dict=3 , a_ : List[str]=4 , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : Optional[int] = position_biased_input
lowerCAmelCase_ : Union[str, Any] = pos_att_type
lowerCAmelCase_ : Tuple = scope
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Tuple = 3_00
return config
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : int , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : List[str] = model(a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : Optional[Any] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : int , a_ : Tuple , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : str , a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : int , a_ : Dict , a_ : int , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Any = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : str ):
lowerCAmelCase_ : Optional[int] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = True
a_ : Dict = False
a_ : int = False
a_ : str = False
a_ : List[Any] = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 241 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__a = _symbol_database.Default()
__a = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
__a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__a = None
__a = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__a = 45
__a = 1_581
__a = 1_517
__a = 1_570
__a = 1_584
__a = 1_793
__a = 1_795
__a = 1_916
__a = 1_864
__a = 1_905
__a = 1_919
__a = 2_429
__a = 2_208
__a = 2_418
__a = 2_323
__a = 2_407
# @@protoc_insertion_point(module_scope) | 366 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 235 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "sew-d"
def __init__( self : Dict , lowercase_ : Optional[Any]=32 , lowercase_ : List[Any]=768 , lowercase_ : int=12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=3072 , lowercase_ : Dict=2 , lowercase_ : List[Any]=512 , lowercase_ : Union[str, Any]=256 , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : List[Any]=("p2c", "c2p") , lowercase_ : Optional[int]="layer_norm" , lowercase_ : List[Any]="gelu_python" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=0.0 , lowercase_ : Any=0.1 , lowercase_ : Dict=0.02 , lowercase_ : str=1e-7 , lowercase_ : Optional[int]=1e-5 , lowercase_ : int="group" , lowercase_ : str="gelu" , lowercase_ : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase_ : int=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_ : List[Any]=False , lowercase_ : int=128 , lowercase_ : List[Any]=16 , lowercase_ : Tuple=True , lowercase_ : Any=0.05 , lowercase_ : Tuple=10 , lowercase_ : List[str]=2 , lowercase_ : Any=0.0 , lowercase_ : int=10 , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]="mean" , lowercase_ : List[Any]=False , lowercase_ : int=False , lowercase_ : str=256 , lowercase_ : int=0 , lowercase_ : str=1 , lowercase_ : Any=2 , **lowercase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feat_extract_norm
SCREAMING_SNAKE_CASE_ : Optional[Any] = feat_extract_activation
SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : str = list(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE_ : int = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ : Dict = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.conv_dim)
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : int = squeeze_factor
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = position_buckets
SCREAMING_SNAKE_CASE_ : Tuple = share_att_key
SCREAMING_SNAKE_CASE_ : Optional[int] = relative_attention
SCREAMING_SNAKE_CASE_ : Tuple = norm_rel_ebd
SCREAMING_SNAKE_CASE_ : Optional[int] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_ : str = feat_proj_dropout
SCREAMING_SNAKE_CASE_ : Optional[Any] = final_dropout
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = feature_layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Any = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
F'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ : Optional[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_time_prob
SCREAMING_SNAKE_CASE_ : Dict = mask_time_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_time_min_masks
SCREAMING_SNAKE_CASE_ : List[Any] = mask_feature_prob
SCREAMING_SNAKE_CASE_ : Tuple = mask_feature_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE_ : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ : Any = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ : List[Any] = classifier_proj_size
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 91 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = 16 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='longest' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ , drop_last=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['lr']
SCREAMING_SNAKE_CASE__ = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE__ = int(config['seed'] )
SCREAMING_SNAKE_CASE__ = int(config['batch_size'] )
SCREAMING_SNAKE_CASE__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase_ )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 169 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase__ :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTDoubleHeadsModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = OpenAIGPTForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ : Any =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ : Dict =(
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ):
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Optional[int] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase_ ) # the president is
SCREAMING_SNAKE_CASE__ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 169 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : str = ''
__lowerCAmelCase : Tuple = ''
__lowerCAmelCase : Tuple = ''
__lowerCAmelCase : Optional[int] = 1 # (0 is vertical, 1 is horizontal)
def a__ ( ):
'''simple docstring'''
__magic_name__ = get_dataset(A_, A_ )
print("""Processing...""" )
__magic_name__ = update_image_and_anno(A_, A_, A_ )
for index, image in enumerate(A_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__magic_name__ = random_chars(32 )
__magic_name__ = paths[index].split(os.sep )[-1].rsplit(""".""", 1 )[0]
__magic_name__ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''', A_, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(A_ )} with {file_name}''' )
__magic_name__ = []
for anno in new_annos[index]:
__magic_name__ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(A_ )
with open(f'''/{file_root}.txt''', """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
for label_file in glob.glob(os.path.join(A_, """*.txt""" ) ):
__magic_name__ = label_file.split(os.sep )[-1].rsplit(""".""", 1 )[0]
with open(A_ ) as in_file:
__magic_name__ = in_file.readlines()
__magic_name__ = os.path.join(A_, f'''{label_name}.jpg''' )
__magic_name__ = []
for obj_list in obj_lists:
__magic_name__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A_ )
labels.append(A_ )
return img_paths, labels
def a__ ( A_, A_, A_ = 1 ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
for idx in range(len(A_ ) ):
__magic_name__ = []
__magic_name__ = img_list[idx]
path_list.append(A_ )
__magic_name__ = anno_list[idx]
__magic_name__ = cva.imread(A_ )
if flip_type == 1:
__magic_name__ = cva.flip(A_, A_ )
for bbox in img_annos:
__magic_name__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__magic_name__ = cva.flip(A_, A_ )
for bbox in img_annos:
__magic_name__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A_ )
new_imgs_list.append(A_ )
return new_imgs_list, new_annos_lists, path_list
def a__ ( A_ = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__magic_name__ = ascii_lowercase + digits
return "".join(random.choice(A_ ) for _ in range(A_ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 88 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str=1_3 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : str=True , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=9_9 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=3_2 , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=5_1_2 , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict="last" , UpperCAmelCase : int=True , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=0 , ) -> Dict:
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: Dict = batch_size
__lowerCAmelCase: Tuple = seq_length
__lowerCAmelCase: Tuple = is_training
__lowerCAmelCase: Optional[Any] = use_input_lengths
__lowerCAmelCase: List[str] = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: int = gelu_activation
__lowerCAmelCase: Optional[int] = sinusoidal_embeddings
__lowerCAmelCase: Tuple = causal
__lowerCAmelCase: Optional[Any] = asm
__lowerCAmelCase: int = n_langs
__lowerCAmelCase: Tuple = vocab_size
__lowerCAmelCase: List[Any] = n_special
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: Union[str, Any] = num_hidden_layers
__lowerCAmelCase: Dict = num_attention_heads
__lowerCAmelCase: int = hidden_dropout_prob
__lowerCAmelCase: List[str] = attention_probs_dropout_prob
__lowerCAmelCase: Dict = max_position_embeddings
__lowerCAmelCase: List[str] = type_sequence_label_size
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: List[str] = num_labels
__lowerCAmelCase: List[str] = num_choices
__lowerCAmelCase: Optional[int] = summary_type
__lowerCAmelCase: Any = use_proj
__lowerCAmelCase: Optional[Any] = scope
__lowerCAmelCase: Dict = bos_token_id
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase: str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase: Any = None
if self.use_input_lengths:
__lowerCAmelCase: Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase: str = None
if self.use_token_type_ids:
__lowerCAmelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase: int = None
__lowerCAmelCase: Optional[int] = None
__lowerCAmelCase: Optional[int] = None
if self.use_labels:
__lowerCAmelCase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase: str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase: Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str] , ) -> Optional[int]:
__lowerCAmelCase: List[str] = XLMModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Any = model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , langs=UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , ) -> int:
__lowerCAmelCase: str = XLMWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
__lowerCAmelCase: Dict = XLMForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: str = model(UpperCAmelCase )
__lowerCAmelCase: List[str] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = XLMForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[str] = model(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
__lowerCAmelCase: Any = model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
((__lowerCAmelCase) , ): List[str] = result_with_labels.to_tuple()
__lowerCAmelCase: Union[str, Any] = model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
((__lowerCAmelCase) , ): List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , ) -> List[Any]:
__lowerCAmelCase: Optional[Any] = XLMForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = model(UpperCAmelCase )
__lowerCAmelCase: Tuple = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
__lowerCAmelCase: Union[str, Any] = self.num_labels
__lowerCAmelCase: Tuple = XLMForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
__lowerCAmelCase: List[Any] = self.num_choices
__lowerCAmelCase: Optional[Any] = XLMForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCAmelCase: List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase: Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self : Tuple ) -> int:
__lowerCAmelCase: Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): Union[str, Any] = config_and_inputs
__lowerCAmelCase: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_lowercase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowercase : Any = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowercase : Optional[int] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False ) -> Dict:
__lowerCAmelCase: Optional[Any] = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: int = XLMModelTester(self )
__lowerCAmelCase: Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , emb_dim=3_7 )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Dict ) -> List[Any]:
__lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
__lowerCAmelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__lowerCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase )
def UpperCAmelCase ( self : Any ) -> Any:
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Dict=1 ) -> Dict:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(UpperCAmelCase ) )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: int = min_length + idx + 1
__lowerCAmelCase: Union[str, Any] = min_length + idx + 1
__lowerCAmelCase: Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase ) )
def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=1 ) -> Union[str, Any]:
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(
[isinstance(UpperCAmelCase , UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase ) , )
self.assertEqual(len(UpperCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(UpperCAmelCase ):
# adds PAD dummy token
__lowerCAmelCase: Any = min_length + idx + 1
__lowerCAmelCase: str = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase ) , )
pass
@slow
def UpperCAmelCase ( self : int ) -> Tuple:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase: List[Any] = XLMModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase: Union[str, Any] = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase ) # the president
__lowerCAmelCase: Union[str, Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase: str = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase )
| 322 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__snake_case : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
if exitstatus == 5:
__snake_case : List[Any] = 0
# Doctest custom flag to ignore output.
__UpperCamelCase = doctest.register_optionflag("IGNORE_RESULT")
__UpperCamelCase = doctest.OutputChecker
class _A ( __lowercase ):
def lowercase__ ( self : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , __magic_name__ , __magic_name__ , __magic_name__ )
__UpperCamelCase = CustomOutputChecker
__UpperCamelCase = HfDoctestModule
__UpperCamelCase = HfDocTestParser
| 13 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCamelCase = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
__UpperCamelCase = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
class _A ( __lowercase ):
lowercase__: Any = VOCAB_FILES_NAMES
lowercase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: Optional[Any] = ['''input_ids''', '''attention_mask''']
lowercase__: List[str] = BartTokenizer
def __init__( self : Union[str, Any] , __magic_name__ : int=None , __magic_name__ : Tuple=None , __magic_name__ : Dict=None , __magic_name__ : Optional[Any]="replace" , __magic_name__ : int="<s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Union[str, Any]="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Tuple , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : str = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : str = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__magic_name__ )
__snake_case : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Any = """post_processor"""
__snake_case : Any = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Tuple = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : int = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : Optional[int] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : str = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : List[Any] = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : Union[str, Any] = value
def lowercase__ ( self : Any , *__magic_name__ : Union[str, Any] , **__magic_name__ : Tuple ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , *__magic_name__ : Optional[int] , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Optional[Any] = kwargs.get("""is_split_into_words""" , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : List[str] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : Optional[int] = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 13 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
snake_case : int = '''Create a default config file for Accelerate with only a few flags set.'''
def __lowercase ( __lowerCAmelCase : int="no" , __lowerCAmelCase : str = default_json_config_file , __lowerCAmelCase : bool = False ):
a__ = Path(__lowerCAmelCase )
path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
a__ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
a__ = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
a__ = torch.cuda.device_count()
a__ = num_gpus
a__ = False
if num_gpus > 1:
a__ = 'MULTI_GPU'
else:
a__ = 'NO'
elif is_xpu_available() and use_xpu:
a__ = torch.xpu.device_count()
a__ = num_xpus
a__ = False
if num_xpus > 1:
a__ = 'MULTI_XPU'
else:
a__ = 'NO'
elif is_npu_available():
a__ = torch.npu.device_count()
a__ = num_npus
a__ = False
if num_npus > 1:
a__ = 'MULTI_NPU'
else:
a__ = 'NO'
else:
a__ = 0
a__ = True
a__ = 1
a__ = 'NO'
a__ = ClusterConfig(**__lowerCAmelCase )
config.to_json_file(__lowerCAmelCase )
return path
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ):
a__ = parser.add_parser('default' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase )
parser.add_argument(
'--config_file' , default=__lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 240 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ):
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 240 | 1 |
from __future__ import annotations
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 292 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =LongformerTokenizer
a : Optional[int] =True
a : Tuple =LongformerTokenizerFast
a : Dict =True
def _a ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: int = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_: Optional[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_: Tuple = {'unk_token': '<unk>'}
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = 'lower newer'
UpperCamelCase_: Optional[Any] = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Any = 'lower newer'
UpperCamelCase_: Any = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase_: str = tokenizer.tokenize(_lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Dict = tokens + [tokenizer.unk_token]
UpperCamelCase_: int = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_lowerCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_lowerCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _a ( self ):
UpperCamelCase_: int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
UpperCamelCase_: Dict = tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: int = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
UpperCamelCase_: Tuple = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self ):
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Optional[int] = 'Encode this sequence.'
UpperCamelCase_: List[Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCamelCase_: Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase )
UpperCamelCase_: int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCamelCase_: Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
# Testing spaces after special tokens
UpperCamelCase_: List[Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )} ) # mask token has a left space
UpperCamelCase_: List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_: Dict = 'Encode <mask> sequence'
UpperCamelCase_: Dict = 'Encode <mask>sequence'
UpperCamelCase_: Any = tokenizer.encode(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = encoded.index(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer.encode(_lowerCamelCase )
UpperCamelCase_: List[Any] = encoded.index(_lowerCamelCase )
UpperCamelCase_: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
pass
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Tuple = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[Any] = 'A, <mask> AllenNLP sentence.'
UpperCamelCase_: int = tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
UpperCamelCase_: Any = tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase_: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase_: str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCamelCase_: Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _lowerCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _lowerCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _lowerCamelCase )
def _a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: str = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: str = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Optional[int] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Tuple = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Dict = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) | 292 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( _UpperCamelCase : Any ):
__lowerCamelCase = int(__a )
__lowerCamelCase = t // 36_00, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any=3_00 ):
return F"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCamelCase = F"""{elt:.6f}""" if isinstance(__a ,__a ) else str(__a )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCAmelCase :
lowerCAmelCase__ = 5
lowerCAmelCase__ = 0.2
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 300 , ):
'''simple docstring'''
__lowerCamelCase = total
__lowerCamelCase = '''''' if prefix is None else prefix
__lowerCamelCase = leave
__lowerCamelCase = parent
__lowerCamelCase = width
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None ):
'''simple docstring'''
__lowerCamelCase = value
if comment is not None:
__lowerCamelCase = comment
if self.last_value is None:
__lowerCamelCase = time.time()
__lowerCamelCase = value
__lowerCamelCase = None
__lowerCamelCase = self.warmup
__lowerCamelCase = 1
self.update_bar(_a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCamelCase = time.time()
__lowerCamelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCamelCase = self.elapsed_time / (value - self.start_value)
else:
__lowerCamelCase = None
if value >= self.total:
__lowerCamelCase = self.total
__lowerCamelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCamelCase = self.average_time_per_item * (self.total - value)
self.update_bar(_a )
__lowerCamelCase = value
__lowerCamelCase = current_time
if self.average_time_per_item is None:
__lowerCamelCase = 1
else:
__lowerCamelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = ''' ''' * (len(str(self.total ) ) - len(str(_a ) )) + str(_a )
if self.elapsed_time is None:
__lowerCamelCase = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
__lowerCamelCase = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
__lowerCamelCase = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase ( self ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class __lowerCAmelCase ( __lowercase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
super().__init__(_a )
__lowerCamelCase = None if column_names is None else [column_names]
__lowerCamelCase = None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCamelCase = disp.display(disp.HTML(self.html_code ) , display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.inner_table is None:
__lowerCamelCase = [list(values.keys() ), list(values.values() )]
else:
__lowerCamelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a )
__lowerCamelCase = columns
self.inner_table.append([values[c] for c in columns] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=300 ):
'''simple docstring'''
__lowerCamelCase = NotebookProgressBar(_a , prefix=_a , parent=self , width=_a )
return self.child_bar
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = None
self.display()
class __lowerCAmelCase ( __lowercase ):
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__lowerCamelCase = NotebookTrainingTracker(state.max_steps , _a )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
__lowerCamelCase = False
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if not has_length(_a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCamelCase = self.training_tracker.add_child(len(_a ) )
else:
__lowerCamelCase = NotebookProgressBar(len(_a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCamelCase = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCamelCase = state.global_step
self.training_tracker.write_line(_a )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
if self.training_tracker is not None:
__lowerCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__lowerCamelCase = log['''loss''']
break
if self.first_column == "Epoch":
__lowerCamelCase = int(state.epoch )
else:
__lowerCamelCase = state.global_step
__lowerCamelCase = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__lowerCamelCase = re.sub(r'''\_loss$''' , '''''' , _a )
__lowerCamelCase = metrics.pop('''total_flos''' , _a )
__lowerCamelCase = metrics.pop('''epoch''' , _a )
__lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_runtime""" , _a )
__lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , _a )
__lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , _a )
__lowerCamelCase = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , _a )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
__lowerCamelCase = v
else:
__lowerCamelCase = k.split('''_''' )
__lowerCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]] )
__lowerCamelCase = v
self.training_tracker.write_line(_a )
self.training_tracker.remove_child()
__lowerCamelCase = None
# Evaluation takes a long time so we should force the next update.
__lowerCamelCase = True
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=_a )
__lowerCamelCase = None
| 330 |
a__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 235 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase__ = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ShapEPipeline
lowerCAmelCase_ : List[str] = ["""prompt"""]
lowerCAmelCase_ : Union[str, Any] = ["""prompt"""]
lowerCAmelCase_ : str = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : int = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return 8
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**_UpperCAmelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase__ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = torch_device == """cpu"""
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCAmelCase__ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ = pipe(
"""a shark""" , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 61 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_lowerCAmelCase : int = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase ):
def __init__( self :Tuple , *lowerCamelCase :Dict , **lowerCamelCase :Optional[Any] ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 169 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
@staticmethod
def UpperCAmelCase_ ( *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> Tuple:
pass
@is_pipeline_test
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@require_torch
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
UpperCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self :str ) -> Dict:
UpperCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
UpperCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 169 | 1 |
def UpperCamelCase_( snake_case__: int = 50 ) -> List[Any]:
UpperCAmelCase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 356 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase : List[Any] = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def A_ ( _UpperCAmelCase ):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def A_ ( _UpperCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE_: Union[str, Any] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE_: Tuple = 0
# Doctest custom flag to ignore output.
lowerCAmelCase : Any = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCAmelCase : str = doctest.OutputChecker
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
lowerCAmelCase : Union[str, Any] = CustomOutputChecker
lowerCAmelCase : Dict = HfDoctestModule
lowerCAmelCase : Union[str, Any] = HfDocTestParser
| 13 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 1 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
A : Optional[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def a__ ( ):
SCREAMING_SNAKE_CASE_ = os.path.dirname(os.path.realpath(lowercase_ ) )
SCREAMING_SNAKE_CASE_ = os.path.join(lowercase_ , "words.txt" )
SCREAMING_SNAKE_CASE_ = ""
with open(lowercase_ ) as f:
SCREAMING_SNAKE_CASE_ = f.readline()
SCREAMING_SNAKE_CASE_ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
SCREAMING_SNAKE_CASE_ = [
word
for word in [sum(ord(lowercase_ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowercase_ )
if __name__ == "__main__":
print(solution())
| 362 | import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( __UpperCamelCase ):
return x + 2
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
SCREAMING_SNAKE_CASE_ = "x = y"
SCREAMING_SNAKE_CASE_ = {"y": 5}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 5, "y": 5} )
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "y = add_two(x)"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result is None
assert "tried to execute add_two" in out.out
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = "x = 3"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3} )
def __A ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "x = 3\ny = 5"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 5} )
def __A ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = "text = f'This is x: {x}.'"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__magic_name__ , {"x": 3, "text": "This is x: 3."} )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 2} )
SCREAMING_SNAKE_CASE_ = {"x": 8}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 8, "y": 5} )
def __A ( self : str ) -> str:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
self.assertListEqual(__magic_name__ , [3, 5] )
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "y = x"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {} , state=__magic_name__ )
assert result == 3
self.assertDictEqual(__magic_name__ , {"x": 3, "y": 3} )
def __A ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = "test_list = [x, add_two(x)]\ntest_list[1]"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_list": [3, 5]} )
SCREAMING_SNAKE_CASE_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
SCREAMING_SNAKE_CASE_ = {"x": 3}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"add_two": add_two} , state=__magic_name__ )
assert result == 5
self.assertDictEqual(__magic_name__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __A ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = "x = 0\nfor i in range(3):\n x = i"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(__magic_name__ , {"range": range} , state=__magic_name__ )
assert result == 2
self.assertDictEqual(__magic_name__ , {"x": 2, "i": 2} )
| 305 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_snake_case : List[str] = get_logger(__name__)
_snake_case : List[Any] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
@add_start_docstrings(__UpperCamelCase )
def __call__( self :int , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCAmelCase :
@add_start_docstrings(__UpperCamelCase )
def __call__( self :Optional[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCAmelCase ( lowercase_ ):
@add_start_docstrings(__UpperCamelCase )
def __call__( self :str , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int , **__UpperCamelCase :Any ):
for processor in self:
A = inspect.signature(processor.__call__ ).parameters
if len(__UpperCamelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
A = processor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
else:
A = processor(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Tuple , __UpperCamelCase :float ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
A = temperature
def __call__( self :Optional[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A = scores / self.temperature
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :List[Any] , __UpperCamelCase :float , __UpperCamelCase :float = -float("Inf" ) , __UpperCamelCase :int = 1 ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
A = top_p
A = filter_value
A = min_tokens_to_keep
def __call__( self :Dict , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A, A = lax.top_k(__UpperCamelCase , scores.shape[-1] )
A = jnp.full_like(__UpperCamelCase , self.filter_value )
A = jax.nn.softmax(__UpperCamelCase , axis=-1 ).cumsum(axis=-1 )
A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A = jnp.roll(__UpperCamelCase , 1 )
score_mask |= score_mask.at[:, 0].set(__UpperCamelCase )
# min tokens to keep
A = score_mask.at[:, : self.min_tokens_to_keep].set(__UpperCamelCase )
A = jnp.where(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = jax.lax.sort_key_val(__UpperCamelCase , __UpperCamelCase )[-1]
return next_scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Any , __UpperCamelCase :int , __UpperCamelCase :float = -float("Inf" ) , __UpperCamelCase :int = 1 ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
A = max(__UpperCamelCase , __UpperCamelCase )
A = filter_value
def __call__( self :Tuple , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A, A = scores.shape
A = jnp.full(batch_size * vocab_size , self.filter_value )
A = min(self.top_k , scores.shape[-1] ) # Safety check
A, A = lax.top_k(__UpperCamelCase , __UpperCamelCase )
A = jnp.broadcast_to((jnp.arange(__UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A = topk_scores.flatten()
A = topk_indices.flatten() + shift
A = next_scores_flat.at[topk_indices_flat].set(__UpperCamelCase )
A = next_scores_flat.reshape(__UpperCamelCase , __UpperCamelCase )
return next_scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :int , __UpperCamelCase :int ):
A = bos_token_id
def __call__( self :List[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A = jnp.full(scores.shape , -float("inf" ) )
A = 1 - jnp.bool_(cur_len - 1 )
A = jnp.where(__UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , __UpperCamelCase )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Tuple , __UpperCamelCase :int , __UpperCamelCase :int ):
A = max_length
A = eos_token_id
def __call__( self :List[Any] , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A = jnp.full(scores.shape , -float("inf" ) )
A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A = jnp.where(__UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , __UpperCamelCase )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :int ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
A = min_length
A = eos_token_id
def __call__( self :Any , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
# create boolean flag to decide if min length penalty should be applied
A = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A = jnp.where(__UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __UpperCamelCase )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Union[str, Any] , __UpperCamelCase :Dict , __UpperCamelCase :Dict ):
A = list(__UpperCamelCase )
A = begin_index
def __call__( self :Optional[int] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[str] , __UpperCamelCase :int ):
A = 1 - jnp.bool_(cur_len - self.begin_index )
A = jnp.where(__UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __UpperCamelCase )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :List[str] , __UpperCamelCase :list ):
A = list(__UpperCamelCase )
def __call__( self :str , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
A = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :List[str] ):
A = dict(__UpperCamelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A = force_token_array.at[index].set(__UpperCamelCase )
A = jnp.intaa(__UpperCamelCase )
def __call__( self :Tuple , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :jnp.ndarray , __UpperCamelCase :int ):
def _force_token(__UpperCamelCase :Any ):
A = scores.shape[0]
A = self.force_token_array[generation_idx]
A = jnp.ones_like(__UpperCamelCase , dtype=scores.dtype ) * -float("inf" )
A = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A = lax.dynamic_update_slice(__UpperCamelCase , __UpperCamelCase , (0, current_token) )
return new_scores
A = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__UpperCamelCase ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = generate_config.eos_token_id
A = generate_config.no_timestamps_token_id
A = generate_config.no_timestamps_token_id + 1
A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__UpperCamelCase , "max_initial_timestamp_index" ):
A = generate_config.max_initial_timestamp_index
else:
A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A = model_config.vocab_size
def __call__( self :Any , __UpperCamelCase :str , __UpperCamelCase :Any , __UpperCamelCase :int ):
# suppress <|notimestamps|> which is handled by without_timestamps
A = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] ):
A = jnp.where((cur_len - self.begin_index) >= 1 , __UpperCamelCase , __UpperCamelCase )
A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __UpperCamelCase , )
A = jnp.where((cur_len - self.begin_index) < 2 , __UpperCamelCase , __UpperCamelCase )
A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __UpperCamelCase , __UpperCamelCase , )
return jnp.where(
__UpperCamelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __UpperCamelCase , )
A = jax.vmap(__UpperCamelCase )(__UpperCamelCase , __UpperCamelCase )
A = jnp.where(cur_len == self.begin_index , __UpperCamelCase , __UpperCamelCase )
A = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __UpperCamelCase , )
A = self.timestamp_begin + self.max_initial_timestamp_index
A = jnp.where(
__UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __UpperCamelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
A = jax.nn.log_softmax(__UpperCamelCase , axis=-1 )
def handle_cumulative_probs(__UpperCamelCase :Dict , __UpperCamelCase :Optional[int] ):
A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __UpperCamelCase , )
A = jax.vmap(__UpperCamelCase )(__UpperCamelCase , __UpperCamelCase )
return scores
| 292 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def A__ ( UpperCamelCase ):
A = [False] * len(UpperCamelCase )
A = [-1] * len(UpperCamelCase )
def dfs(UpperCamelCase , UpperCamelCase ):
A = True
A = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase , 1 - c )
for i in range(len(UpperCamelCase ) ):
if not visited[i]:
dfs(UpperCamelCase , 0 )
for i in range(len(UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_snake_case : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 292 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[str] = 'upernet'
def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ):
super().__init__(**__lowerCAmelCase )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = backbone_config.get("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(__lowerCAmelCase )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 30 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = emb.weight.shape
_a : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_a : int = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = torch.load(__lowerCamelCase , map_location="""cpu""" )
_a : Optional[int] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_a : Union[str, Any] = mam_aaa["model"]
remove_ignore_keys_(__lowerCamelCase )
_a : Dict = state_dict["encoder.embed_tokens.weight"].shape[0]
_a : Union[str, Any] = MaMaaaConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
_a : str = state_dict["decoder.embed_tokens.weight"]
_a : Any = MaMaaaForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
_a : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_snake_case = parser.parse_args()
_snake_case = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 294 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A_ :
'''simple docstring'''
pass
| 61 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ =logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Any = ["pixel_values"]
def __init__(self : Union[str, Any] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 2_5_5 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = True , **snake_case_ : Optional[int] , ):
super().__init__(**snake_case_ )
__a : Any = size if size is not None else {'''shortest_edge''': 2_2_4}
__a : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__a : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__a : Optional[Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''' )
__a : Tuple = do_resize
__a : Any = size
__a : List[Any] = resample
__a : Tuple = do_center_crop
__a : List[Any] = crop_size
__a : List[Any] = do_rescale
__a : str = rescale_factor
__a : List[Any] = do_normalize
__a : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__a : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
__a : str = do_convert_rgb
def lowerCAmelCase (self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Any , ):
__a : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__a : Optional[int] = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : int , ):
__a : Union[str, Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : int , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case_ : List[str] , ):
__a : List[Any] = do_resize if do_resize is not None else self.do_resize
__a : Any = size if size is not None else self.size
__a : Optional[Any] = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_ )
__a : Dict = resample if resample is not None else self.resample
__a : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Dict = crop_size if crop_size is not None else self.crop_size
__a : Any = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_ )
__a : Any = do_rescale if do_rescale is not None else self.do_rescale
__a : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Any = do_normalize if do_normalize is not None else self.do_normalize
__a : int = image_mean if image_mean is not None else self.image_mean
__a : Tuple = image_std if image_std is not None else self.image_std
__a : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__a : List[Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__a : Optional[Any] = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__a : List[str] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__a : str = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__a : Optional[Any] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__a : Dict = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__a : List[str] = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__a : str = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 90 |
from math import pi, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ):
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ =1.0
while num:
lowercase__ =float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 90 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
'''simple docstring'''
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = None
_UpperCamelCase : List[Any] = None
_UpperCamelCase : int = graph
self._normalize_graph(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
_UpperCamelCase : Tuple = None
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
if sources is int:
_UpperCamelCase : Optional[int] = [sources]
if sinks is int:
_UpperCamelCase : Union[str, Any] = [sinks]
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
return
_UpperCamelCase : List[str] = sources[0]
_UpperCamelCase : str = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase__ ) > 1 or len(lowerCamelCase__ ) > 1:
_UpperCamelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCamelCase : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
_UpperCamelCase : List[Any] = max_input_flow
_UpperCamelCase : Tuple = 0
_UpperCamelCase : int = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCamelCase : Optional[int] = max_input_flow
_UpperCamelCase : Optional[int] = size - 1
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : str = algorithm(self )
class lowercase__ :
def __init__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = flow_network
_UpperCamelCase : List[str] = flow_network.verticesCount
_UpperCamelCase : List[str] = flow_network.sourceIndex
_UpperCamelCase : Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCamelCase : List[Any] = flow_network.graph
_UpperCamelCase : Any = False
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if not self.executed:
self._algorithm()
_UpperCamelCase : Any = True
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
# use this to save your result
_UpperCamelCase : Tuple = -1
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase__ ( lowercase ):
def __init__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCamelCase : Optional[Any] = [0] * self.verticies_count
_UpperCamelCase : List[str] = [0] * self.verticies_count
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCamelCase : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCamelCase : int = 0
while i < len(lowerCamelCase__ ):
_UpperCamelCase : List[Any] = vertices_list[i]
_UpperCamelCase : str = self.heights[vertex_index]
self.process_vertex(lowerCamelCase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(lowerCamelCase__ ) )
_UpperCamelCase : Dict = 0
else:
i += 1
_UpperCamelCase : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase__ ,lowerCamelCase__ )
self.relabel(lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCamelCase : List[Any] = self.heights[to_index]
if min_height is not None:
_UpperCamelCase : Any = min_height + 1
if __name__ == "__main__":
snake_case_ : List[str] = [0]
snake_case_ : int = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
snake_case_ : int = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
snake_case_ : List[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
snake_case_ : Tuple = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 236 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
snake_case_ : Union[str, Any] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
snake_case_ : Any = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def A__ ( ):
_UpperCamelCase : str = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_UpperCamelCase : Any = bs[:]
_UpperCamelCase : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
_UpperCamelCase : Any = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Tuple = set()
_UpperCamelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase : Any = char
return pairs
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]="replace" ,lowerCamelCase__ : str="<s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : str="</s>" ,lowerCamelCase__ : Any="<s>" ,lowerCamelCase__ : Tuple="<unk>" ,lowerCamelCase__ : List[str]="<pad>" ,lowerCamelCase__ : Optional[Any]="<mask>" ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : List[str] ,):
'''simple docstring'''
_UpperCamelCase : Dict = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else bos_token
_UpperCamelCase : Tuple = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
_UpperCamelCase : Optional[int] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else sep_token
_UpperCamelCase : Tuple = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cls_token
_UpperCamelCase : Union[str, Any] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
_UpperCamelCase : List[str] = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase : str = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,add_prefix_space=lowerCamelCase__ ,**lowerCamelCase__ ,)
with open(lowerCamelCase__ ,encoding='utf-8' ) as vocab_handle:
_UpperCamelCase : List[str] = json.load(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Optional[Any] = errors # how to handle errors in decoding
_UpperCamelCase : Tuple = bytes_to_unicode()
_UpperCamelCase : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ ,encoding='utf-8' ) as merges_handle:
_UpperCamelCase : Dict = merges_handle.read().split('\n' )[1:-1]
_UpperCamelCase : str = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase : Dict = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Any ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase : Dict = tuple(lowerCamelCase__ )
_UpperCamelCase : List[str] = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase : List[str] = min(lowerCamelCase__ ,key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase : int = bigram
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Dict = 0
while i < len(lowerCamelCase__ ):
try:
_UpperCamelCase : int = word.index(lowerCamelCase__ ,lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase : Dict = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase : int = tuple(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
_UpperCamelCase : Any = get_pairs(lowerCamelCase__ )
_UpperCamelCase : int = ' '.join(lowerCamelCase__ )
_UpperCamelCase : List[Any] = word
return word
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = []
for token in re.findall(self.pat ,lowerCamelCase__ ):
_UpperCamelCase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = ''.join(lowerCamelCase__ )
_UpperCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors )
return text
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : List[Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCamelCase : Union[str, Any] = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase__ ,ensure_ascii=lowerCamelCase__ ) + '\n' )
_UpperCamelCase : Optional[Any] = 0
with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_UpperCamelCase : int = token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
_UpperCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : Tuple = [self.sep_token_id]
_UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : Tuple = kwargs.pop('add_prefix_space' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase : List[str] = ' ' + text
return (text, kwargs)
| 236 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "trocr"
A_ = ["past_key_values"]
A_ = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self , __a=5_0265 , __a=1024 , __a=12 , __a=16 , __a=4096 , __a="gelu" , __a=512 , __a=0.1 , __a=0.0 , __a=0.0 , __a=2 , __a=0.02 , __a=0.0 , __a=True , __a=False , __a=True , __a=True , __a=1 , __a=0 , __a=2 , **__a , ):
'''simple docstring'''
__a : Union[str, Any] = vocab_size
__a : Dict = d_model
__a : Optional[Any] = decoder_layers
__a : Tuple = decoder_attention_heads
__a : int = decoder_ffn_dim
__a : Union[str, Any] = activation_function
__a : List[str] = max_position_embeddings
__a : List[Any] = dropout
__a : Union[str, Any] = attention_dropout
__a : Optional[int] = activation_dropout
__a : Optional[Any] = init_std
__a : List[Any] = decoder_layerdrop
__a : int = use_cache
__a : List[Any] = scale_embedding
__a : Any = use_learned_position_embeddings
__a : List[str] = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 27 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Dict , **_UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , **_UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
lowercase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = processor(text=_UpperCAmelCase )
lowercase__ = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ (self : List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(_UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
lowercase__ = """lower newer"""
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 305 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCamelCase__ = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BlenderbotSmallTokenizer
def __init__( self : List[Any] , _A : List[Any]=None , _A : Optional[Any]=None , _A : Optional[int]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : List[str]="<|endoftext|>" , _A : Any=False , _A : Union[str, Any]=True , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_A , merges=_A , add_prefix_space=_A , trim_offsets=_A , ) , bos_token=_A , eos_token=_A , unk_token=_A , **_A , )
UpperCAmelCase__ : List[Any] = add_prefix_space
def lowercase_ ( self : str , _A : Any , _A : Any=None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 356 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCamelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
for attribute in key.split('''.''' ):
UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ : int = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : Dict = fairseq_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ : Any = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCAmelCase__ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ : Tuple = '''weight'''
else:
UpperCAmelCase__ : Optional[Any] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase__ : Optional[Any] = name.split('''.''' )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any:
if config_path is not None:
UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase__ : int = UniSpeechSatConfig()
UpperCAmelCase__ : Tuple = ''''''
if is_finetuned:
UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCAmelCase__ : Union[str, Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCamelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 299 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=0.9 , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =num_channels
__UpperCamelCase =patch_size
__UpperCamelCase =tubelet_size
__UpperCamelCase =num_frames
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =mask_ratio
__UpperCamelCase =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__UpperCamelCase =(image_size // patch_size) ** 2
__UpperCamelCase =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__UpperCamelCase =int(mask_ratio * self.seq_length )
def _a ( self ) -> int:
__UpperCamelCase =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Tuple:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ ) -> str:
__UpperCamelCase =VideoMAEModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase =torch.ones((self.num_masks,) )
__UpperCamelCase =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__UpperCamelCase =mask.expand(self.batch_size , -1 ).bool()
__UpperCamelCase =model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# model only returns predictions for masked patches
__UpperCamelCase =mask.sum().item()
__UpperCamelCase =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase__ : Optional[int] = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : int = False
def _a ( self ) -> Optional[int]:
__UpperCamelCase =VideoMAEModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _a ( self , A_ , A_ , A_=False ) -> str:
__UpperCamelCase =copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase =torch.ones((self.model_tester.num_masks,) )
__UpperCamelCase =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__UpperCamelCase =mask.expand(self.model_tester.batch_size , -1 ).bool()
__UpperCamelCase =bool_masked_pos.to(SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def _a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
@slow
def _a ( self ) -> Optional[Any]:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =VideoMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _a ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =True
for model_class in self.all_model_classes:
__UpperCamelCase =self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =True
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase =True
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCamelCase =len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase =outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a ( self ) -> List[str]:
def check_hidden_states_output(A_ , A_ , A_ ):
__UpperCamelCase =model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase =outputs.hidden_states
__UpperCamelCase =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _a ( self ) -> List[Any]:
pass
def _UpperCAmelCase ( ):
__UpperCamelCase =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
__UpperCamelCase =np.load(snake_case__ )
return list(snake_case__ )
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Optional[int]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> int:
__UpperCamelCase =VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_video()
__UpperCamelCase =image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =torch.tensor([0.3669, -0.0688, -0.2421] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def _a ( self ) -> Dict:
__UpperCamelCase =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_video()
__UpperCamelCase =image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# add boolean mask, indicating which patches to mask
__UpperCamelCase =hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
__UpperCamelCase =torch.Size([1, 1408, 1536] )
__UpperCamelCase =torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=SCREAMING_SNAKE_CASE_ )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__UpperCamelCase =torch.tensor([0.5142] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__UpperCamelCase =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=SCREAMING_SNAKE_CASE_ ).to(
SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__UpperCamelCase =model(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase =torch.tensor(torch.tensor([0.6469] ) , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 62 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Any , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding='''VALID''' , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' , )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase_ = ACTaFN[activation] if activation is not None else tf.identity
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = self.convolution(self.padding(SCREAMING_SNAKE_CASE_ ) )
lowercase_ = self.normalization(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : str ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_channels
lowercase_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
lowercase_ = shape_list(SCREAMING_SNAKE_CASE_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1) )
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name='''convolution''' )
lowercase_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
lowercase_ = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
for layer_module in self.attention:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_state * pooled
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.2''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = in_channels != out_channels or stride != 1
lowercase_ = max(1 , out_channels // config.groups_width )
lowercase_ = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase_ = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name='''layer.3''' ),
]
lowercase_ = ACTaFN[config.hidden_act]
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
lowercase_ = hidden_state
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.shortcut(SCREAMING_SNAKE_CASE_ )
hidden_state += residual
lowercase_ = self.activation(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase_ = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name='''layers.0''' ),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> int:
for layer_module in self.layers:
lowercase_ = layer_module(SCREAMING_SNAKE_CASE_ )
return hidden_state
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : Dict ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'''stages.{i+1}''' ) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
lowercase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
lowercase_ = stage_module(SCREAMING_SNAKE_CASE_ )
if output_hidden_states:
lowercase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ )
@keras_serializable
class lowercase__( tf.keras.layers.Layer ):
"""simple docstring"""
a :str = RegNetConfig
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Any:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = config
lowercase_ = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name='''embedder''' )
lowercase_ = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name='''encoder''' )
lowercase_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name='''pooler''' )
@unpack_inputs
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_outputs[0]
lowercase_ = self.pooler(SCREAMING_SNAKE_CASE_ )
# Change to NCHW output format have uniformity in the modules
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
lowercase_ = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase_ = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = RegNetConfig
a :Any = 'regnet'
a :List[str] = 'pixel_values'
@property
def _lowercase ( self : List[str] ) -> str:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
__a = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class lowercase__( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = config.num_labels
lowercase_ = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name='''regnet''' )
# classification head
lowercase_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = outputs.pooler_output if return_dict else outputs[1]
lowercase_ = self.classifier[0](SCREAMING_SNAKE_CASE_ )
lowercase_ = self.classifier[1](SCREAMING_SNAKE_CASE_ )
lowercase_ = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
if not return_dict:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states )
| 30 | 0 |
_snake_case = tuple[float, float, float]
_snake_case = tuple[float, float, float]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = end_pointa[0] - end_pointa[0]
_lowerCAmelCase : str = end_pointa[1] - end_pointa[1]
_lowerCAmelCase : Optional[int] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i
_lowerCAmelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_lowerCAmelCase : Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return tuple(round(_UpperCAmelCase , _UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 10 ):
'''simple docstring'''
_lowerCAmelCase : Any = create_vector(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = create_vector(_UpperCAmelCase , _UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
| 350 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( UpperCamelCase__ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__lowerCamelCase = xgb.predict(UpperCamelCase__ )
__lowerCamelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = fetch_california_housing()
__lowerCamelCase , __lowerCamelCase = data_handling(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
__lowerCamelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 90 |
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : int = 1_0001 ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase_ ( a_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
SCREAMING_SNAKE_CASE_ = BarthezTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().setUp()
a = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=__lowerCamelCase )
a = tokenizer
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
a = '''<pad>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(vocab_keys[-1] ,'''<mask>''' )
self.assertEqual(len(__lowerCamelCase ) ,10_11_22 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_11_22 )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [0, 57, 30_18, 7_03_07, 91, 2]
a = self.tokenizer(
__lowerCamelCase ,max_length=len(__lowerCamelCase ) ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = '''I was born in 92000, and this is falsé.'''
a = tokenizer.tokenize(__lowerCamelCase )
a = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__lowerCamelCase )
a = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase ,model_name='''moussaKam/mbarthez''' ,revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' ,sequences=__lowerCamelCase ,)
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( lowerCAmelCase):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self: Tuple ):
raise NotImplementedError()
| 236 |
import torch
def UpperCAmelCase__ ( ):
if torch.cuda.is_available():
lowercase :Optional[int] = torch.cuda.device_count()
else:
lowercase :Dict = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 236 | 1 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_snake_case = True
from torch.cuda.amp import autocast
_snake_case = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'Whether to log verbose messages or not.'} , )
lowerCamelCase__ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'})
lowerCamelCase__ = field(
default=0.9_9_9_9_9_5 , metadata={'help': 'Decay of gumbel temperature during training.'})
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowerCAmelCase : Optional[Any] = logging.WARNING
if model_args.verbose_logging:
_lowerCAmelCase : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowerCAmelCase : str = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(
default=a , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default=a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
lowerCamelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCamelCase__ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'})
lowerCamelCase__ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ = field(
default=2_0.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'})
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = "longest"
lowerCamelCase__ = None
lowerCamelCase__ = None
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.feature_extractor.pad(
__a, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", )
_lowerCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowerCAmelCase : Optional[Any] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowerCAmelCase : List[str] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowerCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Union[str, Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowerCAmelCase : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__a, min_masks=2, )
return batch
class UpperCAmelCase_ ( a):
def __init__( self, *__a, __a=1, __a=0, __a=1.0, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = max_gumbel_temp
_lowerCAmelCase : List[Any] = min_gumbel_temp
_lowerCAmelCase : int = gumbel_temp_decay
def snake_case__ ( self, __a, __a):
'''simple docstring'''
model.train()
_lowerCAmelCase : str = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_lowerCAmelCase : Any = self.compute_loss(__a, __a)
else:
_lowerCAmelCase : Dict = self.compute_loss(__a, __a)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowerCAmelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowerCAmelCase : Union[str, Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowerCAmelCase : List[str] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp))
return loss.detach()
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
_lowerCAmelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : int = DatasetDict()
_lowerCAmelCase : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowerCAmelCase : List[str] = DatasetDict()
_lowerCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowerCAmelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowerCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
_lowerCAmelCase , _lowerCAmelCase : Any = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowerCAmelCase : Tuple = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowerCAmelCase : Dict = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowerCAmelCase : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowerCAmelCase : Union[str, Any] = WavaVecaForPreTraining(_lowerCamelCase )
_lowerCAmelCase : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 300 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_lowerCAmelCase : Optional[Any] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_lowerCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
_lowerCAmelCase : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Optional[int] = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=__a, padding_value=1.0)
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=__a, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Optional[Any] = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : List[str] = processor(text=__a)
_lowerCAmelCase : List[Any] = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Any = processor(images=__a, visual_prompt=__a)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = CLIPSegProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[str] = processor.batch_decode(__a)
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
| 300 | 1 |
'''simple docstring'''
UpperCamelCase_ = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
UpperCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 309 |
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ ={
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =[
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
def a_ ( _lowercase , _lowercase ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_UpperCamelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowercase ) )
return round(_lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
from __future__ import annotations
def __snake_case ( _lowerCAmelCase : list[float] ) -> bool:
if len(_lowerCAmelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A_ : List[str] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300 | 1 |
'''simple docstring'''
from __future__ import annotations
class _A :
def __init__( self : str , __magic_name__ : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = data
__snake_case : int = None
def __repr__( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : List[str] = self
while temp:
string_rep.append(f'''{temp.data}''' )
__snake_case : int = temp.next
return "->".join(__magic_name__ )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not elements_list:
raise Exception("""The Elements List is empty""" )
__snake_case : List[Any] = Node(elements_list[0] )
for i in range(1 , len(_lowerCamelCase ) ):
__snake_case : List[Any] = Node(elements_list[i] )
__snake_case : Union[str, Any] = current.next
return head
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(_lowerCamelCase , _lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def _a ( ) -> Any:
"""simple docstring"""
from doctest import testmod
testmod()
__snake_case : Dict = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(_lowerCamelCase )
print("""Elements in Reverse:""" )
print_reverse(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 13 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class _a ( lowerCAmelCase__ ):
_lowercase : List[Any] = '''efficientformer'''
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] = [3, 2, 6, 4] , UpperCamelCase_: Optional[Any] = [48, 96, 224, 448] , UpperCamelCase_: Any = [True, True, True, True] , UpperCamelCase_: Dict = 448 , UpperCamelCase_: List[str] = 32 , UpperCamelCase_: List[str] = 4 , UpperCamelCase_: Dict = 7 , UpperCamelCase_: str = 5 , UpperCamelCase_: List[Any] = 8 , UpperCamelCase_: Any = 4 , UpperCamelCase_: Tuple = 0.0 , UpperCamelCase_: Dict = 16 , UpperCamelCase_: List[Any] = 3 , UpperCamelCase_: Dict = 3 , UpperCamelCase_: Optional[int] = 3 , UpperCamelCase_: List[Any] = 2 , UpperCamelCase_: Optional[Any] = 1 , UpperCamelCase_: Optional[Any] = 0.0 , UpperCamelCase_: List[str] = 1 , UpperCamelCase_: int = True , UpperCamelCase_: List[Any] = True , UpperCamelCase_: Union[str, Any] = 1E-5 , UpperCamelCase_: Tuple = "gelu" , UpperCamelCase_: Optional[Any] = 0.02 , UpperCamelCase_: Any = 1E-1_2 , UpperCamelCase_: Union[str, Any] = 224 , UpperCamelCase_: int = 1E-0_5 , **UpperCamelCase_: List[str] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = hidden_sizes
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = depths
lowercase__ = mlp_expansion_ratio
lowercase__ = downsamples
lowercase__ = dim
lowercase__ = key_dim
lowercase__ = attention_ratio
lowercase__ = resolution
lowercase__ = pool_size
lowercase__ = downsample_patch_size
lowercase__ = downsample_stride
lowercase__ = downsample_pad
lowercase__ = drop_path_rate
lowercase__ = num_metaad_blocks
lowercase__ = distillation
lowercase__ = use_layer_scale
lowercase__ = layer_scale_init_value
lowercase__ = image_size
lowercase__ = batch_norm_eps
| 110 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "mctct"
def __init__( self , snake_case__=8065 , snake_case__=1536 , snake_case__=36 , snake_case__=6144 , snake_case__=4 , snake_case__=384 , snake_case__=920 , snake_case__=1E-5 , snake_case__=0.3 , snake_case__="relu" , snake_case__=0.02 , snake_case__=0.3 , snake_case__=0.3 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__=0.3 , snake_case__=1 , snake_case__=(7,) , snake_case__=(3,) , snake_case__=80 , snake_case__=1 , snake_case__=None , snake_case__="sum" , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Dict = attention_head_dim
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Optional[Any] = layerdrop
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : Optional[Any] = bos_token_id
_lowerCAmelCase : Dict = eos_token_id
_lowerCAmelCase : Optional[int] = conv_glu_dim
_lowerCAmelCase : List[str] = conv_dropout
_lowerCAmelCase : Tuple = num_conv_layers
_lowerCAmelCase : Optional[Any] = input_feat_per_channel
_lowerCAmelCase : Union[str, Any] = input_channels
_lowerCAmelCase : Optional[int] = conv_channels
_lowerCAmelCase : Optional[int] = ctc_loss_reduction
_lowerCAmelCase : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
_lowerCAmelCase : Union[str, Any] = list(snake_case__ )
_lowerCAmelCase : Dict = list(snake_case__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
| 25 |
'''simple docstring'''
from math import isqrt
def lowercase (_A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(_A ) + 1 ) )
def lowercase (_A = 1_0**6 ):
"""simple docstring"""
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[str] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = ['''BeitFeatureExtractor''']
_lowerCAmelCase : Union[str, Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300 | 1 |
import numpy as np
def UpperCamelCase ( __lowerCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase ( __lowerCamelCase : np.array ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCamelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__lowerCamelCase = {
"""facebook/xglm-564M""": 20_48,
}
class UpperCAmelCase ( A_ ):
A__ : Any = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__(self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Any="<s>" , snake_case__ : str="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Any , ) -> None:
'''simple docstring'''
snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case : Optional[int] = 7
snake_case : List[str] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case : int = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case : Any = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
snake_case : Tuple = len(self.sp_model )
snake_case : Any = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case__ )
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = self.__dict__.copy()
snake_case : str = None
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : Dict , snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : List[str] = {}
snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case : Tuple = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ ))
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ ))
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _SCREAMING_SNAKE_CASE (self : int ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : List[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : Tuple ) -> int:
'''simple docstring'''
snake_case : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 10 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : List[str] =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = state_dict.pop(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = val
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__SCREAMING_SNAKE_CASE : Dict = value
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = value
return new_state_dict
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
if is_panoptic:
__SCREAMING_SNAKE_CASE : Any = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[:256, :]
__SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_bias[:256]
__SCREAMING_SNAKE_CASE : Dict = in_proj_weight[256:512, :]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[256:512]
__SCREAMING_SNAKE_CASE : Any = in_proj_weight[-256:, :]
__SCREAMING_SNAKE_CASE : str = in_proj_bias[-256:]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__SCREAMING_SNAKE_CASE : int = '''resnet101'''
if "dc5" in model_name:
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Tuple = '''panoptic''' in model_name
if is_panoptic:
__SCREAMING_SNAKE_CASE : List[str] = 250
else:
__SCREAMING_SNAKE_CASE : List[str] = 91
__SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[Any] = '''coco-detection-id2label.json'''
__SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Any = idalabel
__SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
# load image processor
__SCREAMING_SNAKE_CASE : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ConditionalDetrImageProcessor(format=_lowerCAmelCase )
# prepare image
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE : Tuple = image_processor(images=_lowerCAmelCase , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[int] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , _lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__SCREAMING_SNAKE_CASE : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__SCREAMING_SNAKE_CASE : Any = '''conditional_detr.''' + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = rename_backbone_keys(_lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__SCREAMING_SNAKE_CASE : int = state_dict.pop(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__SCREAMING_SNAKE_CASE : int = state_dict.pop(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = val
# finally, create HuggingFace model and load state dict
__SCREAMING_SNAKE_CASE : List[Any] = ConditionalDetrForSegmentation(_lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=_lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__SCREAMING_SNAKE_CASE : int = conditional_detr(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__lowerCAmelCase : int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 9 |
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = arr.split("," )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
UpperCamelCase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCamelCase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCamelCase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase : Tuple =input("""please input some numbers:""")
UpperCAmelCase : Optional[int] =SubArray(whole_array)
UpperCAmelCase : List[Any] =array.solve_sub_array()
print(("""the results is:""", re))
| 128 | 0 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 15 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 1 |
from __future__ import annotations
class __lowercase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : List[Any]=None):
SCREAMING_SNAKE_CASE_: str = data
SCREAMING_SNAKE_CASE_: Optional[int] = None
def __repr__( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: int = self
while temp:
string_rep.append(F"{temp.data}")
SCREAMING_SNAKE_CASE_: Union[str, Any] = temp.next
return "->".join(lowerCAmelCase__)
def A_ ( _UpperCAmelCase ):
if not elements_list:
raise Exception("The Elements List is empty" )
SCREAMING_SNAKE_CASE_: Tuple = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Tuple = Node(elements_list[i] )
SCREAMING_SNAKE_CASE_: Any = current.next
return head
def A_ ( _UpperCAmelCase ):
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def A_ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_: Optional[Any] = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_UpperCAmelCase )
print("Elements in Reverse:" )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 1 |
'''simple docstring'''
from math import ceil
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : int ):
_A : str = list(range(0 ,lowerCamelCase ) )
_A : Union[str, Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_A : str = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCamelCase )
# Missing blocks
_A : int = [i for i in blocks if i not in device_map_blocks]
_A : Union[str, Any] = [i for i in device_map_blocks if i not in blocks]
if len(lowerCamelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCamelCase ) )
if len(lowerCamelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCamelCase ) )
if len(lowerCamelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCamelCase ) )
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : Any ):
_A : Dict = list(range(lowerCamelCase ) )
_A : Optional[int] = int(ceil(n_layers / len(lowerCamelCase ) ) )
_A : str = [layers[i : i + n_blocks] for i in range(0 ,lowerCamelCase ,lowerCamelCase )]
return dict(zip(lowerCamelCase ,lowerCamelCase ) )
| 227 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A : int = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE : int="<pad>" , SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<unk>" , SCREAMING_SNAKE_CASE : List[Any]=None , ):
_A , _A , _A , _A : Any = bos, unk, pad, eos
_A : Optional[Any] = []
_A : Optional[Any] = []
_A : Optional[int] = {}
_A : Dict = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : str = self.add_symbol(SCREAMING_SNAKE_CASE)
_A : Any = self.add_symbol(SCREAMING_SNAKE_CASE)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE)
_A : List[str] = len(self.symbols)
def __eq__( self : int , SCREAMING_SNAKE_CASE : Optional[Any]):
return self.indices == other.indices
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Union[str, Any]):
return len(self.symbols)
def __contains__( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]):
return sym in self.indices
@classmethod
def A ( cls : Dict , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : Any = cls()
d.add_from_file(SCREAMING_SNAKE_CASE)
return d
def A ( self : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=1 , SCREAMING_SNAKE_CASE : int=False):
if word in self.indices and not overwrite:
_A : str = self.indices[word]
_A : List[str] = self.count[idx] + n
return idx
else:
_A : Optional[Any] = len(self.symbols)
_A : Union[str, Any] = idx
self.symbols.append(SCREAMING_SNAKE_CASE)
self.count.append(SCREAMING_SNAKE_CASE)
return idx
def A ( self : Dict , SCREAMING_SNAKE_CASE : Optional[int]):
return 0
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
try:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE))
return
_A : Union[str, Any] = f.readlines()
_A : Any = self._load_meta(SCREAMING_SNAKE_CASE)
for line in lines[indices_start_line:]:
try:
_A , _A : List[str] = line.rstrip().rsplit(' ' , 1)
if field == "#fairseq:overwrite":
_A : int = True
_A , _A : List[str] = line.rsplit(' ' , 1)
else:
_A : Union[str, Any] = False
_A : List[str] = int(SCREAMING_SNAKE_CASE)
_A : Optional[Any] = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE))
self.add_symbol(SCREAMING_SNAKE_CASE , n=SCREAMING_SNAKE_CASE , overwrite=SCREAMING_SNAKE_CASE)
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'')
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_A : Union[str, Any] = dict((re.sub(R'@@$' ,'' ,lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' ,'</w>' ,lowerCamelCase ), v) for k, v in d.items() )
_A : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
_A : str = d[k] # restore
return da
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : List[str] ):
# prep
if not os.path.exists(lowerCamelCase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowerCamelCase ,exist_ok=lowerCamelCase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
_A : Dict = os.path.join(lowerCamelCase ,'checkpoint.pt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
_A : int = torch.load(lowerCamelCase ,map_location='cpu' )
_A : Dict = chkpt['cfg']['model']
# dicts
_A : Any = os.path.join(lowerCamelCase ,'dict.txt' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
_A : Any = Dictionary.load(lowerCamelCase )
_A : Optional[int] = rewrite_dict_keys(src_dict.indices )
_A : List[Any] = len(lowerCamelCase )
_A : str = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# merges_file (bpecodes)
_A : Optional[int] = os.path.join(lowerCamelCase ,'bpecodes' )
if not os.path.isfile(lowerCamelCase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
_A : Dict = os.path.join(lowerCamelCase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowerCamelCase ,lowerCamelCase )
# model config
_A : str = os.path.join(lowerCamelCase ,'config.json' )
_A : int = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# tokenizer config
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
_A : Any = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowerCamelCase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(lowerCamelCase ,ensure_ascii=lowerCamelCase ,indent=lowerCamelCase ) )
# model
_A : List[Any] = chkpt['model']
# remove unneeded keys
_A : int = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase ,lowerCamelCase )
_A : Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_A : str = model_state_dict.pop(lowerCamelCase )
else:
_A : Dict = model_state_dict.pop(lowerCamelCase )
_A : Any = BioGptConfig.from_pretrained(lowerCamelCase )
_A : Union[str, Any] = BioGptForCausalLM(lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase )
# save
_A : Union[str, Any] = os.path.join(lowerCamelCase ,lowerCamelCase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowerCamelCase ,lowerCamelCase )
print('Conversion is done!' )
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A : int = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 227 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 1 |
def __lowercase ( a__ ) -> list:
if len(a__ ) <= 1:
return [tuple(a__ )]
__SCREAMING_SNAKE_CASE = []
def generate(a__ , a__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = arr[k - 1], arr[i]
else: # k is odd
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = arr[k - 1], arr[0]
generate(k - 1 , a__ )
generate(len(a__ ) , a__ )
return res
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ : List[str] =[int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 118 |
lowerCAmelCase__ : Optional[int] =9.80_665
def __lowercase ( a__ , a__ , a__ = g ) -> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 118 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : Optional[int]=15_36 , lowerCamelCase_ : Union[str, Any]=24 , lowerCamelCase_ : int=24 , lowerCamelCase_ : Dict=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : List[Any]=1e-7 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=-1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : List[str]="gelu" , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = relative_attention
SCREAMING_SNAKE_CASE : List[str] = max_relative_positions
SCREAMING_SNAKE_CASE : str = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in pos_att_type.lower().split("""|""" )]
SCREAMING_SNAKE_CASE : Tuple = pos_att_type
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get("""pooler_hidden_size""" , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Dict = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return 12
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 323 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: int =9, 14 # noqa: F841
lowerCamelCase__: List[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase__: List[str] =defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase__: List[str] =mst(__a )
lowerCamelCase__: Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase__: Optional[int] =tuple(answer[:2] )
lowerCamelCase__: List[Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 10 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any):
_A : int = logging.get_logger()
# the current default level is logging.WARNING
_A : List[str] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any]):
_A : List[str] = logging.get_verbosity()
_A : Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart')
_A : List[Any] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
logger.warning(SCREAMING_SNAKE_CASE)
self.assertEqual(cl.out , msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
logger.warning(SCREAMING_SNAKE_CASE)
self.assertEqual(cl.out , '')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
logger.warning(SCREAMING_SNAKE_CASE)
self.assertEqual(cl.out , msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def A ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_A : Dict = logging.get_logger('transformers.models.bart.tokenization_bart')
_A : str = os.getenv('TRANSFORMERS_VERBOSITY' , SCREAMING_SNAKE_CASE)
_A : Optional[int] = logging.log_levels[env_level_str]
_A : Optional[int] = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
_A : List[Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def A ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_A : int = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out)
# no need to restore as nothing was changed
def A ( self : Optional[int]):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_A : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
_A : Optional[Any] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE)
self.assertEqual(cl.out , '')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE)
self.assertEqual(cl.out , msg + '\n')
def lowerCAmelCase__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 355 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ):
_A : Dict = multiprocessing.Manager()
_A : List[Any] = manager.list()
_A : Dict = multiprocessing.Process(target=lowerCamelCase ,args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_A : Any = shutil.rmtree
_A : Optional[int] = os.rmdir
_A : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_A : str = {}
with swallow_io():
with time_limit(lowerCamelCase ):
exec(lowerCamelCase ,lowerCamelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
_A : Optional[int] = rmtree
_A : Optional[Any] = rmdir
_A : Dict = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : int ):
def signal_handler(lowerCamelCase : str ,lowerCamelCase : Any ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL ,lowerCamelCase )
signal.signal(signal.SIGALRM ,lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL ,0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
_A : Any = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase ):
with contextlib.redirect_stderr(lowerCamelCase ):
with redirect_stdin(lowerCamelCase ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase ):
yield dirname
class __lowerCamelCase ( a_ ):
"""simple docstring"""
pass
class __lowerCamelCase ( io.StringIO ):
"""simple docstring"""
def A ( self : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any]):
raise OSError
def A ( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int]):
raise OSError
def A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Tuple):
return False
class __lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
a = "stdin"
@contextlib.contextmanager
def lowerCAmelCase__ ( lowerCamelCase : Tuple ):
if root == ".":
yield
return
_A : Any = os.getcwd()
os.chdir(lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : List[Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_A : List[Any] = None
_A : Dict = None
import os
_A : Union[str, Any] = '1'
_A : int = None
_A : Optional[int] = None
_A : int = None
_A : Any = None
_A : Optional[int] = None
_A : Union[str, Any] = None
_A : List[Any] = None
_A : int = None
_A : List[Any] = None
_A : Tuple = None
_A : Any = None
_A : Tuple = None
_A : Optional[int] = None
_A : Optional[Any] = None
_A : str = None
_A : Dict = None
_A : List[str] = None
_A : Union[str, Any] = None
_A : Union[str, Any] = None
_A : str = None
_A : str = None
_A : str = None
_A : Any = None
_A : Union[str, Any] = None
_A : str = None
_A : List[str] = None
_A : Union[str, Any] = None
import shutil
_A : int = None
_A : Any = None
_A : List[Any] = None
import subprocess
_A : Optional[Any] = None # type: ignore
_A : List[Any] = None
import sys
_A : Any = None
_A : Tuple = None
_A : str = None
_A : Tuple = None
_A : List[str] = None
| 227 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 15 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15 | 1 |
from manim import *
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase : Optional[int] = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase : Any = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : int = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : List[Any] = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = Text('''CPU''' , font_size=24 )
UpperCamelCase : str = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = [mem.copy() for i in range(4 )]
UpperCamelCase : Union[str, Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : int = Text('''GPU''' , font_size=24 )
UpperCamelCase : str = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
gpu.move_to([-1, -1, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
UpperCamelCase : List[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = Text('''Model''' , font_size=24 )
UpperCamelCase : Any = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[str] = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
target.move_to(__SCREAMING_SNAKE_CASE )
model_arr.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__SCREAMING_SNAKE_CASE )
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase : Any = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : Dict = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 )
UpperCamelCase : str = Text('''Disk''' , font_size=24 )
UpperCamelCase : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE )
disk.move_to([-4, -1.25, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase : List[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = Square(0.3 )
input.set_fill(__SCREAMING_SNAKE_CASE , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __SCREAMING_SNAKE_CASE , buff=0.5 )
self.play(Write(__SCREAMING_SNAKE_CASE ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.02 )
self.play(MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.play(FadeOut(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[Any] = Arrow(start=__SCREAMING_SNAKE_CASE , end=__SCREAMING_SNAKE_CASE , color=__SCREAMING_SNAKE_CASE , buff=0.5 )
a.next_to(model_arr[0].get_left() , __SCREAMING_SNAKE_CASE , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCamelCase : int = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) )
UpperCamelCase : Union[str, Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_cpu_arr[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCamelCase : Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __SCREAMING_SNAKE_CASE , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCamelCase : int = AnimationGroup(
FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , FadeIn(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__SCREAMING_SNAKE_CASE )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCamelCase : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i] , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(model_arr[i + 1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(cpu_left_col_base[-1] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , Circumscribe(gpu_rect[0] , color=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCamelCase : str = a_c
UpperCamelCase : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__SCREAMING_SNAKE_CASE ) , FadeOut(__SCREAMING_SNAKE_CASE , run_time=0.5 ) , )
UpperCamelCase : List[str] = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , MoveToTarget(__SCREAMING_SNAKE_CASE ) )
self.wait()
| 315 |
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowercase: Dict = None
_lowercase: Optional[int] = logging.get_logger(__name__)
_lowercase: Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase: List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_lowercase: Any = {
"camembert-base": 512,
}
_lowercase: List[Any] = "▁"
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ["input_ids", "attention_mask"]
__A = CamembertTokenizer
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase_ , ):
"""simple docstring"""
a = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
a = vocab_file
a = False if not self.vocab_file else True
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 227 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase: Union[str, Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: int = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowercase: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 227 | 1 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_UpperCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCamelCase__ = QuantumRegister(_UpperCamelCase , "qr" )
UpperCamelCase__ = ClassicalRegister(_UpperCamelCase , "cr" )
UpperCamelCase__ = QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = number_of_qubits
for i in range(_UpperCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_UpperCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _UpperCamelCase , _UpperCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_UpperCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_UpperCamelCase , _UpperCamelCase )
# simulate with 10000 shots
UpperCamelCase__ = Aer.get_backend("qasm_simulator" )
UpperCamelCase__ = execute(_UpperCamelCase , _UpperCamelCase , shots=1_00_00 )
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
) | 31 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = k_size // 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
SCREAMING_SNAKE_CASE_ = 1 / (2 * pi * sigma) * exp(-(square(__UpperCamelCase ) + square(__UpperCamelCase )) / (2 * square(__UpperCamelCase )) )
return g
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.shape[0], image.shape[1]
# dst image height and width
SCREAMING_SNAKE_CASE_ = height - k_size + 1
SCREAMING_SNAKE_CASE_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
SCREAMING_SNAKE_CASE_ = zeros((dst_height * dst_width, k_size * k_size) )
SCREAMING_SNAKE_CASE_ = 0
for i, j in product(range(__UpperCamelCase ) , range(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE_ = ravel(image[i : i + k_size, j : j + k_size] )
SCREAMING_SNAKE_CASE_ = window
row += 1
# turn the kernel into shape(k*k, 1)
SCREAMING_SNAKE_CASE_ = gen_gaussian_kernel(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = ravel(__UpperCamelCase )
# reshape and get the dst image
SCREAMING_SNAKE_CASE_ = dot(__UpperCamelCase , __UpperCamelCase ).reshape(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
return dst
if __name__ == "__main__":
# read original image
A : Tuple = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
A : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A : Tuple = gaussian_filter(gray, 3, sigma=1)
A : Optional[int] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 118 | from __future__ import annotations
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : str ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = text, pattern
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = len(__magic_name__ ), len(__magic_name__ )
def __A ( self : List[str] , __magic_name__ : str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : Dict , __magic_name__ : int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : Tuple ) -> list[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE_ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE_ = self.mismatch_in_text(__magic_name__ )
if mismatch_index == -1:
positions.append(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : Dict = "ABAABA"
A : Union[str, Any] = "AB"
A : str = BoyerMooreSearch(text, pattern)
A : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 118 | 1 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = '''\
Text data.
Second line of data.'''
lowerCAmelCase__ = '''file'''
@pytest.fixture(scope="session" )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCamelCase = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCamelCase = input_paths[compression_format]
UpperCamelCase = tmp_path / "cache"
UpperCamelCase = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
UpperCamelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = "custom_cache"
UpperCamelCase = "custom_extracted_dir"
UpperCamelCase = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCamelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase = xz_file
UpperCamelCase = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
UpperCamelCase = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
UpperCamelCase = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
UpperCamelCase = "./__missing_file__.txt"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_from_cache(F"tmp://{tmpfs_file}" )
with open(_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def a__ ( ):
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("https://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("ftp://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("s3://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("s3://huggingface.co" )
| 371 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase__ = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=_SCREAMING_SNAKE_CASE , all_configs=_SCREAMING_SNAKE_CASE , save_infos=_SCREAMING_SNAKE_CASE )
UpperCamelCase = TestCommand(*_SCREAMING_SNAKE_CASE )
test_command.run()
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , "README.md" )
assert os.path.exists(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict.from_directory(_SCREAMING_SNAKE_CASE )
UpperCamelCase = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_351_563,
"num_examples": 10_000,
},
{
"name": "validation",
"num_bytes": 238_418,
"num_examples": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["default"] , _SCREAMING_SNAKE_CASE ), getattr(expected_dataset_infos["default"] , _SCREAMING_SNAKE_CASE )
if key == "num_bytes":
assert is_apercent_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif key == "splits":
assert list(_SCREAMING_SNAKE_CASE ) == list(_SCREAMING_SNAKE_CASE )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 244 | 0 |
from __future__ import annotations
lowerCAmelCase_ = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if location := find_empty_location(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = digit
if sudoku(SCREAMING_SNAKE_CASE__ ) is not None:
return grid
snake_case_ = 0
return None
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE__ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
lowerCAmelCase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 8 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = 50 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=lowerCamelCase_ , )
a = image.to(self.device )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
a = (image / 2 + 0.5).clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=lowerCamelCase_ ), "This is a local test"
| 227 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__snake_case : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ = json.load(_UpperCamelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(_UpperCamelCase ):
try:
lowerCAmelCase__ = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = new_word
if len(_UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
lowerCAmelCase__ = ' '.join(_UpperCamelCase )
lowerCAmelCase__ = word
return word
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
for token in re.findall(self.pat , _UpperCamelCase ):
lowerCAmelCase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ''.join(_UpperCamelCase )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
lowerCAmelCase__ = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ' ' + text
return (text, kwargs)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase = None , _UpperCamelCase = None , ):
"""simple docstring"""
lowerCAmelCase__ = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase__ = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCamelCase )
if needs_to_be_padded:
lowerCAmelCase__ = len(_UpperCamelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 359 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = """sshleifer/mar_enro_6_3_student"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCamelCase , )
lowerCAmelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(_UpperCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCAmelCase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __SCREAMING_SNAKE_CASE ( __lowercase):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowerCAmelCase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCAmelCase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCAmelCase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = bash_script.replace('--fp16' , '' )
lowerCAmelCase__ = 6
lowerCAmelCase__ = (
['distillation.py']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
F"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationDistiller.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase__ = distill_main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 122 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=False ):
if rouge_types is None:
_A = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_A = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase )
if use_aggregator:
_A = scoring.BootstrapAggregator()
else:
_A = []
for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
_A = scorer.score(_UpperCAmelCase , _UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(_UpperCAmelCase )
else:
scores.append(_UpperCAmelCase )
if use_aggregator:
_A = aggregator.aggregate()
else:
_A = {}
for key in scores[0]:
_A = [score[key] for score in scores]
return result
| 315 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = '''xlnet'''
UpperCAmelCase : List[Any] = ['''mems''']
UpperCAmelCase : Any = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ):
_A = vocab_size
_A = d_model
_A = n_layer
_A = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
_A = d_model // n_head
_A = ff_activation
_A = d_inner
_A = untie_r
_A = attn_type
_A = initializer_range
_A = layer_norm_eps
_A = dropout
_A = mem_len
_A = reuse_len
_A = bi_data
_A = clamp_len
_A = same_length
_A = summary_type
_A = summary_use_proj
_A = summary_activation
_A = summary_last_dropout
_A = start_n_top
_A = end_n_top
_A = bos_token_id
_A = pad_token_id
_A = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , _UpperCAmelCase , )
_A = kwargs['use_cache']
_A = use_mems_eval
_A = use_mems_train
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Tuple ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 315 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : List[Any] = 2**power
_UpperCamelCase : Tuple = str(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
_UpperCamelCase : Tuple = 0
for i in list_num:
sum_of_num += int(UpperCAmelCase_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ : Optional[Any] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ : Any = solution(power)
print('Sum of the digits is: ', result)
| 236 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : List[str] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowercase__ ( lowercase ):
lowercase__ = """gptj"""
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any ,lowerCamelCase__ : Optional[Any]=50400 ,lowerCamelCase__ : Tuple=2048 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : int=28 ,lowerCamelCase__ : Optional[Any]=16 ,lowerCamelCase__ : Optional[Any]=64 ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : List[Any]="gelu_new" ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : Tuple=1E-5 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : str=50256 ,lowerCamelCase__ : Any=50256 ,lowerCamelCase__ : Tuple=False ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : Optional[Any] = n_positions
_UpperCamelCase : Union[str, Any] = n_embd
_UpperCamelCase : Any = n_layer
_UpperCamelCase : Optional[int] = n_head
_UpperCamelCase : List[str] = n_inner
_UpperCamelCase : List[Any] = rotary_dim
_UpperCamelCase : int = activation_function
_UpperCamelCase : Dict = resid_pdrop
_UpperCamelCase : Any = embd_pdrop
_UpperCamelCase : Union[str, Any] = attn_pdrop
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Optional[Any] = initializer_range
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = bos_token_id
_UpperCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,tie_word_embeddings=lowerCamelCase__ ,**lowerCamelCase__ )
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : PretrainedConfig ,lowerCamelCase__ : str = "default" ,lowerCamelCase__ : List[PatchingSpec] = None ,lowerCamelCase__ : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,task=lowerCamelCase__ ,patching_specs=lowerCamelCase__ ,use_past=lowerCamelCase__ )
if not getattr(self._config ,'pad_token_id' ,lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCamelCase : int = 0
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ ,direction='inputs' )
_UpperCamelCase : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCamelCase : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return self._config.n_head
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = super(lowerCamelCase__ ,self ).generate_dummy_inputs(
lowerCamelCase__ ,batch_size=lowerCamelCase__ ,seq_length=lowerCamelCase__ ,is_pair=lowerCamelCase__ ,framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCamelCase : Tuple = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCamelCase , _UpperCamelCase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCamelCase : Optional[int] = seqlen + 2
_UpperCamelCase : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCamelCase : Optional[Any] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCamelCase : Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCamelCase : Any = ordered_inputs['attention_mask'].dtype
_UpperCamelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCamelCase__ ,lowerCamelCase__ ,dtype=lowerCamelCase__ )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return 13
| 236 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "luke"
def __init__( self : Optional[int] , A : Tuple=50267 , A : Optional[Any]=500000 , A : Dict=768 , A : List[Any]=256 , A : List[Any]=12 , A : List[Any]=12 , A : List[Any]=3072 , A : int="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : Dict=512 , A : Dict=2 , A : Optional[int]=0.02 , A : List[str]=1E-12 , A : List[str]=True , A : Dict=None , A : List[Any]=1 , A : Optional[int]=0 , A : Any=2 , **A : Tuple , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[Any] = entity_vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Optional[Any] = entity_emb_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = use_entity_aware_attention
_UpperCAmelCase : int = classifier_dropout
| 31 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> str:
return "".join(chr(ord(__UpperCAmelCase ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 247 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase__ : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase__ : str = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids, __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase__ : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = dc.update(1 )
UpperCamelCase__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = dc.update(2 )
UpperCamelCase__ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = dc.update(3 )
UpperCamelCase__ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase__ : Union[str, Any] = DisjunctiveConstraint(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 247 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCamelCase ( __lowerCamelCase , unittest.TestCase ):
UpperCAmelCase : Optional[int] = RoFormerTokenizer
UpperCAmelCase : List[Any] = RoFormerTokenizerFast
UpperCAmelCase : Dict = True
UpperCAmelCase : str = True
def _lowercase (self : List[Any]) -> List[Any]:
super().setUp()
def _lowercase (self : Tuple , **_A : Dict) -> Union[str, Any]:
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **SCREAMING_SNAKE_CASE_)
def _lowercase (self : List[str] , **_A : List[Any]) -> Tuple:
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **SCREAMING_SNAKE_CASE_)
def _lowercase (self : Dict) -> Any:
__snake_case : Optional[int] = '永和服装饰品有限公司,今天天气非常好'
__snake_case : int = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _lowercase (self : int) -> str:
__snake_case : Any = self.get_tokenizer()
__snake_case , __snake_case : Optional[int] = self.get_chinese_input_output_texts()
__snake_case : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split())
__snake_case : int = tokens + [tokenizer.unk_token]
__snake_case : Optional[int] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def _lowercase (self : Dict) -> Tuple:
__snake_case : str = self.get_rust_tokenizer()
__snake_case , __snake_case : List[str] = self.get_chinese_input_output_texts()
__snake_case : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , output_text.split())
__snake_case : List[Any] = tokens + [tokenizer.unk_token]
__snake_case : List[Any] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_) , SCREAMING_SNAKE_CASE_)
def _lowercase (self : Union[str, Any]) -> str:
pass
def _lowercase (self : int) -> Tuple:
pass
def _lowercase (self : Union[str, Any]) -> List[str]:
pass
| 172 |
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image'''])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
lowerCamelCase_ = frozenset(['''example_image''', '''image''', '''mask_image'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''class_labels'''])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(['''batch_size'''])
lowerCamelCase_ = frozenset([])
lowerCamelCase_ = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
lowerCamelCase_ = frozenset(['''prompt''', '''negative_prompt'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
lowerCamelCase_ = frozenset(['''input_tokens'''])
| 244 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''LayoutLMv2ImageProcessor'''
lowerCAmelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[int] = kwargs.pop('''feature_extractor''' )
lowercase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase_ : Union[str, Any] = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ : Optional[int] = features['''words''']
lowercase_ : Dict = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# add pixel values
lowercase_ : List[Any] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase_ : Dict = self.get_overflowing_images(__SCREAMING_SNAKE_CASE , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase_ : int = images
return encoded_inputs
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F''' {len(__SCREAMING_SNAKE_CASE )} and {len(__SCREAMING_SNAKE_CASE )}''' )
return images_with_overflow
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 354 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowerCAmelCase__ ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase_ : Tuple = kwargs.pop('''main_process_only''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = kwargs.pop('''in_order''' , __SCREAMING_SNAKE_CASE )
if self.isEnabledFor(__SCREAMING_SNAKE_CASE ):
if self._should_log(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Optional[Any] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif in_order:
lowercase_ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase_ , lowercase_ : Optional[int] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
if log_level is None:
lowercase_ : Any = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = logging.getLogger(__SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__SCREAMING_SNAKE_CASE , {} )
| 264 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = "▁"
_lowerCamelCase : Union[str, Any] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_lowerCamelCase : str = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_lowerCamelCase : List[Any] = {
"facebook/s2t-small-librispeech-asr": 1_0_2_4,
}
_lowerCamelCase : int = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_lowerCamelCase : Optional[int] = {"mustc": MUSTC_LANGS}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = MAX_MODEL_INPUT_SIZES
UpperCamelCase = ["""input_ids""", """attention_mask"""]
UpperCamelCase = []
def __init__( self : Optional[Any], __A : Optional[Any], __A : Optional[Any], __A : Dict="<s>", __A : Tuple="</s>", __A : Dict="<pad>", __A : int="<unk>", __A : List[str]=False, __A : Tuple=False, __A : Dict=None, __A : Dict=None, __A : Dict = None, **__A : str, ):
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase, eos_token=__UpperCamelCase, unk_token=__UpperCamelCase, pad_token=__UpperCamelCase, do_upper_case=__UpperCamelCase, do_lower_case=__UpperCamelCase, tgt_lang=__UpperCamelCase, lang_codes=__UpperCamelCase, sp_model_kwargs=self.sp_model_kwargs, **__UpperCamelCase, )
UpperCAmelCase : Any = do_upper_case
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : List[Any] = load_json(__UpperCamelCase )
UpperCAmelCase : List[str] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : int = spm_file
UpperCAmelCase : int = load_spm(__UpperCamelCase, self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase : List[str] = lang_codes
UpperCAmelCase : List[Any] = LANGUAGES[lang_codes]
UpperCAmelCase : List[Any] = [F'''<lang:{lang}>''' for lang in self.langs]
UpperCAmelCase : Any = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
UpperCAmelCase : Any = self.lang_tokens
UpperCAmelCase : List[str] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase : int = {}
@property
def __magic_name__ ( self : List[Any] ):
return len(self.encoder )
@property
def __magic_name__ ( self : Optional[Any] ):
return self._tgt_lang
@tgt_lang.setter
def __magic_name__ ( self : str, __A : List[str] ):
UpperCAmelCase : List[Any] = new_tgt_lang
self.set_tgt_lang_special_tokens(__UpperCamelCase )
def __magic_name__ ( self : int, __A : Tuple ):
UpperCAmelCase : List[str] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase : int = [lang_code_id]
def __magic_name__ ( self : Union[str, Any], __A : int ):
return self.sp_model.encode(__UpperCamelCase, out_type=__UpperCamelCase )
def __magic_name__ ( self : str, __A : Dict ):
return self.encoder.get(__UpperCamelCase, self.encoder[self.unk_token] )
def __magic_name__ ( self : Optional[int], __A : Any ):
return self.decoder.get(__UpperCamelCase, self.unk_token )
def __magic_name__ ( self : int, __A : List[Any] ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase : Optional[int] = self.sp_model.decode(__UpperCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(__UpperCamelCase )
UpperCAmelCase : Optional[int] = self.sp_model.decode(__UpperCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __magic_name__ ( self : Optional[Any], __A : int, __A : Dict=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __magic_name__ ( self : Optional[Any], __A : Dict, __A : Tuple = None, __A : str = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase, token_ids_a=__UpperCamelCase, already_has_special_tokens=__UpperCamelCase )
UpperCAmelCase : str = [1] * len(self.prefix_tokens )
UpperCAmelCase : Union[str, Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
UpperCAmelCase : Dict = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Any, __A : int ):
UpperCAmelCase : Any = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCAmelCase : Tuple = {}
UpperCAmelCase : Dict = load_spm(self.spm_file, self.sp_model_kwargs )
def __magic_name__ ( self : List[str], __A : Tuple, __A : str = None ):
UpperCAmelCase : List[Any] = Path(__UpperCamelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
UpperCAmelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder, __UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, __UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase, '''wb''' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
UpperCAmelCase : str = sentencepiece.SentencePieceProcessor(**a__ )
spm.Load(str(a__ ) )
return spm
def a__ ( UpperCAmelCase : str ) -> Union[Dict, List]:
with open(a__ , '''r''' ) as f:
return json.load(a__ )
def a__ ( UpperCAmelCase : List[str] , UpperCAmelCase : str ) -> None:
with open(a__ , '''w''' ) as f:
json.dump(a__ , a__ , indent=2 )
| 336 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float:
UpperCamelCase_ = x
UpperCamelCase_ = y
for step in range(a__ ): # noqa: B007
UpperCamelCase_ = a * a - b * b + x
UpperCamelCase_ = 2 * a * b + y
UpperCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) )
def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image:
UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase_ = figure_width / image_width * image_height
UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase_ = get_distance(a__ , a__ , a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase_ = get_color_coded_rgb(a__ )
else:
UpperCamelCase_ = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122 | 0 |
from __future__ import annotations
import bisect
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_left(a , a , a , a ) , a )
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_right(a , a , a , a ) , a )
def _a ( a :list[int] , a :int ) -> int | None:
a = 0
a = len(a ) - 1
while left <= right:
a = left + (right - left) // 2
a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
a = midpoint - 1
else:
a = midpoint + 1
return None
def _a ( a :list[int] , a :int ) -> int | None:
a = bisect.bisect_left(a , a )
if index != len(a ) and sorted_collection[index] == item:
return index
return None
def _a ( a :list[int] , a :int , a :int , a :int ) -> int | None:
if right < left:
return None
a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a , a , a , midpoint - 1 )
else:
return binary_search_by_recursion(a , a , midpoint + 1 , a )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase__ = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase__ = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase__ = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 26 |
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase :
def __init__( self: Tuple , _lowerCAmelCase: str , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: int=32 , _lowerCAmelCase: Any=16 , _lowerCAmelCase: int=3 , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: int=True , _lowerCAmelCase: Dict=32 , _lowerCAmelCase: Any=4 , _lowerCAmelCase: Optional[Any]=[0, 1, 2, 3] , _lowerCAmelCase: int=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: Optional[Any]="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.02 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: int=[1, 3_84, 24, 24] , _lowerCAmelCase: Any=True , _lowerCAmelCase: List[str]=None , ):
lowercase :Any = parent
lowercase :Optional[Any] = batch_size
lowercase :Tuple = image_size
lowercase :str = patch_size
lowercase :Optional[Any] = num_channels
lowercase :Any = is_training
lowercase :Optional[Any] = use_labels
lowercase :int = hidden_size
lowercase :Union[str, Any] = num_hidden_layers
lowercase :str = backbone_out_indices
lowercase :List[Any] = num_attention_heads
lowercase :Union[str, Any] = intermediate_size
lowercase :List[str] = hidden_act
lowercase :str = hidden_dropout_prob
lowercase :List[Any] = attention_probs_dropout_prob
lowercase :int = initializer_range
lowercase :Dict = num_labels
lowercase :int = backbone_featmap_shape
lowercase :Any = scope
lowercase :Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase :Optional[Any] = (image_size // patch_size) ** 2
lowercase :List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :List[str] = None
if self.use_labels:
lowercase :int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase :List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :List[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 1_92, 3_84, 7_68],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict ):
lowercase :str = DPTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Any = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Dict , _lowerCAmelCase: int ):
lowercase :List[str] = self.num_labels
lowercase :Optional[int] = DPTForDepthEstimation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :int = model(_lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: str ):
lowercase :Tuple = self.num_labels
lowercase :List[Any] = DPTForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :str = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :int = config_and_inputs
lowercase :Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_a = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :List[str] = DPTModelTester(self )
lowercase :List[Any] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self: str ):
pass
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :str = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase :str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[Any] = model_class(_lowerCAmelCase )
lowercase :str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Union[str, Any] = [*signature.parameters.keys()]
lowercase :Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :List[str] = True
if model_class in get_values(_lowerCAmelCase ):
continue
lowercase :str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase :Tuple = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :Union[str, Any] = model(**_lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self: int ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :str = False
lowercase :Optional[Any] = True
if model_class in get_values(_lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase :str = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase :Optional[Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase :Dict = model(**_lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase , lowercase :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :List[Any] = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase :Optional[int] = model_class(config=_lowerCAmelCase )
# Skip the check for the backbone
lowercase :List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase :Tuple = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self: Dict ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase :Tuple = DPTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase , lowercase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase :Dict = "add"
with self.assertRaises(_lowerCAmelCase ):
lowercase :Optional[int] = DPTForDepthEstimation(_lowerCAmelCase )
def UpperCAmelCase__ ( ):
lowercase :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :List[str] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
lowercase :str = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_lowerCAmelCase )
lowercase :Tuple = prepare_img()
lowercase :Optional[Any] = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase :List[Any] = model(**_lowerCAmelCase )
lowercase :Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
lowercase :List[str] = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _lowerCAmelCase )
lowercase :int = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _lowerCAmelCase , atol=1e-4 ) )
| 236 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Tuple = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''ernie_m'''
_a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: List[Any] , _lowerCAmelCase: int = 25_00_02 , _lowerCAmelCase: int = 7_68 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 12 , _lowerCAmelCase: int = 30_72 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: float = 0.1 , _lowerCAmelCase: int = 5_14 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 1e-0_5 , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: List[str]=0.0 , **_lowerCAmelCase: Tuple , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Tuple = vocab_size
lowercase :List[str] = hidden_size
lowercase :Optional[int] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :Optional[Any] = hidden_act
lowercase :Any = hidden_dropout_prob
lowercase :int = attention_probs_dropout_prob
lowercase :Dict = max_position_embeddings
lowercase :Optional[Any] = initializer_range
lowercase :Any = layer_norm_eps
lowercase :Union[str, Any] = classifier_dropout
lowercase :int = is_decoder
lowercase :List[str] = act_dropout
| 236 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
lowerCamelCase : int = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = BartTokenizer
def __init__( self : int , A_ : int=None , A_ : str=None , A_ : Union[str, Any]=None , A_ : Any="replace" , A_ : Dict="<s>" , A_ : List[Any]="</s>" , A_ : List[Any]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[Any]="<unk>" , A_ : str="<pad>" , A_ : Tuple="<mask>" , A_ : int=False , A_ : str=True , **A_ : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A_ ) != add_prefix_space:
lowerCamelCase_ = getattr(A_ , pre_tok_state.pop('type' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**A_ )
lowerCamelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ = 'post_processor'
lowerCamelCase_ = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
lowerCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase_ = tuple(state['cls'] )
lowerCamelCase_ = False
if state.get('add_prefix_space' , A_ ) != add_prefix_space:
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = True
if state.get('trim_offsets' , A_ ) != trim_offsets:
lowerCamelCase_ = trim_offsets
lowerCamelCase_ = True
if changes_to_apply:
lowerCamelCase_ = getattr(A_ , state.pop('type' ) )
lowerCamelCase_ = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a__ ( self : Union[str, Any] , A_ : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
lowerCamelCase_ = value
def a__ ( self : List[str] , *A_ : str , **A_ : Any ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ = kwargs.get('is_split_into_words' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*A_ , **A_ )
def a__ ( self : Union[str, Any] , *A_ : Optional[int] , **A_ : Any ) -> BatchEncoding:
"""simple docstring"""
lowerCamelCase_ = kwargs.get('is_split_into_words' , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*A_ , **A_ )
def a__ ( self : str , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def a__ ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Any=None ) -> str:
"""simple docstring"""
lowerCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a__ ( self : Optional[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 208 |
import math
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = num % 8
lowerCamelCase_ = octal + (remainder * math.floor(math.pow(10 , lowercase ) ))
counter += 1
lowerCamelCase_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(lowercase )}"""
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(2_16 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 208 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
A__ = [[0 for _ in range(lowercase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
A__ = 1
for n in range(m + 1 ):
for k in range(1 , lowercase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 247 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
__lowercase = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
__lowercase = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = (images / 2 + 0.5).clamp(0 , 1 )
__UpperCamelCase :Any = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCamelCase :Any = numpy_to_pil(SCREAMING_SNAKE_CASE )
return images
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if images.ndim == 3:
__UpperCamelCase :int = images[None, ...]
__UpperCamelCase :List[str] = (images * 255).round().astype('''uint8''' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__UpperCamelCase :List[Any] = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images]
else:
__UpperCamelCase :Dict = [Image.fromarray(SCREAMING_SNAKE_CASE ) for image in images]
return pil_images
| 105 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase=False , __lowercase=False , __lowercase=6.0 , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=None , __lowercase="fp4" , __lowercase=False , **__lowercase , ) -> Tuple:
__UpperCamelCase :List[str] = load_in_abit
__UpperCamelCase :Union[str, Any] = load_in_abit
__UpperCamelCase :str = llm_inta_threshold
__UpperCamelCase :List[str] = llm_inta_skip_modules
__UpperCamelCase :Any = llm_inta_enable_fpaa_cpu_offload
__UpperCamelCase :List[Any] = llm_inta_has_fpaa_weight
__UpperCamelCase :str = bnb_abit_quant_type
__UpperCamelCase :Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCamelCase :Tuple = torch.floataa
elif isinstance(__lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = getattr(__lowercase , __lowercase)
elif isinstance(__lowercase , torch.dtype):
__UpperCamelCase :int = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''')
self.post_init()
def UpperCamelCase__ ( self) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , __lowercase):
raise ValueError('''llm_int8_threshold must be a float''')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase):
raise ValueError('''llm_int8_skip_modules must be a list of strings''')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''')
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''')
if not isinstance(self.bnb_abit_quant_type , __lowercase):
raise ValueError('''bnb_4bit_quant_type must be a string''')
if not isinstance(self.bnb_abit_use_double_quant , __lowercase):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''')
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''')) >= version.parse(
'''0.39.0'''):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''')
def UpperCamelCase__ ( self) -> Any:
return self.load_in_abit or self.load_in_abit
def UpperCamelCase__ ( self) -> List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCamelCase__ ( cls , __lowercase , __lowercase , **__lowercase) -> List[str]:
__UpperCamelCase :Optional[int] = cls(**__lowercase)
__UpperCamelCase :Optional[Any] = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase):
setattr(__lowercase , __lowercase , __lowercase)
to_remove.append(__lowercase)
for key in to_remove:
kwargs.pop(__lowercase , __lowercase)
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCamelCase__ ( self , __lowercase) -> Union[str, Any]:
with open(__lowercase , '''w''' , encoding='''utf-8''') as writer:
__UpperCamelCase :Optional[int] = self.to_dict()
__UpperCamelCase :Optional[int] = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + '''\n'''
writer.write(__lowercase)
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Optional[Any] = copy.deepcopy(self.__dict__)
__UpperCamelCase :Optional[int] = str(output['''bnb_4bit_compute_dtype''']).split('''.''')[1]
return output
def __repr__( self) -> Dict:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCamelCase__ ( self , __lowercase = True) -> str:
if use_diff is True:
__UpperCamelCase :Union[str, Any] = self.to_diff_dict()
else:
__UpperCamelCase :Dict = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase) + "\n"
def UpperCamelCase__ ( self) -> Dict[str, Any]:
__UpperCamelCase :Union[str, Any] = self.to_dict()
# get the default config dict
__UpperCamelCase :Optional[Any] = BitsAndBytesConfig().to_dict()
__UpperCamelCase :str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCamelCase :str = value
return serializable_config_dict
| 105 | 1 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
assert isinstance(_a , _a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Tuple = tmp_path / '''cache'''
__magic_name__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : Optional[int] = JsonDatasetReader(_a , cache_dir=_a , keep_in_memory=_a ).read()
_check_json_dataset(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Optional[Any] = tmp_path / '''cache'''
__magic_name__ : Optional[int] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Optional[int] = features.copy() if features else default_expected_features
__magic_name__ : str = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Optional[Any] = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
_check_json_dataset(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[str] = tmp_path / '''cache'''
__magic_name__ : Tuple = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__magic_name__ : str = features.copy() if features else default_expected_features
__magic_name__ : Dict = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : Any = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
assert isinstance(_a , _a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__magic_name__ : List[str] = features.copy()
__magic_name__ : str = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : int = tmp_path / '''cache'''
__magic_name__ : Optional[int] = JsonDatasetReader(_a , features=_a , cache_dir=_a ).read()
assert isinstance(_a , _a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Optional[int] = tmp_path / '''cache'''
__magic_name__ : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Optional[int] = JsonDatasetReader(_a , cache_dir=_a , split=_a ).read()
_check_json_dataset(_a , _a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int ):
"""simple docstring"""
if issubclass(_a , _a ):
__magic_name__ : Optional[int] = jsonl_path
elif issubclass(_a , _a ):
__magic_name__ : Optional[Any] = [jsonl_path]
__magic_name__ : int = tmp_path / '''cache'''
__magic_name__ : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : Tuple = JsonDatasetReader(_a , cache_dir=_a ).read()
_check_json_dataset(_a , _a )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any]=("train",) ):
"""simple docstring"""
assert isinstance(_a , _a )
for split in splits:
__magic_name__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : List[str] = tmp_path / '''cache'''
__magic_name__ : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=_a , keep_in_memory=_a ).read()
_check_json_datasetdict(_a , _a )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = tmp_path / '''cache'''
__magic_name__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : List[Any] = features.copy() if features else default_expected_features
__magic_name__ : Dict = (
Features({feature: Value(_a ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ : str = JsonDatasetReader({'train': jsonl_path} , features=_a , cache_dir=_a ).read()
_check_json_datasetdict(_a , _a )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : List[Any] ):
"""simple docstring"""
if split:
__magic_name__ : List[str] = {split: jsonl_path}
else:
__magic_name__ : List[str] = '''train'''
__magic_name__ : Optional[int] = {'''train''': jsonl_path, '''test''': jsonl_path}
__magic_name__ : List[Any] = tmp_path / '''cache'''
__magic_name__ : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__magic_name__ : int = JsonDatasetReader(_a , cache_dir=_a ).read()
_check_json_datasetdict(_a , _a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
return json.load(_a )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
return [json.loads(_a ) for line in buffer]
class _lowerCamelCase :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __lowerCAmelCase ( self : List[str] , _A : Any , _A : List[Any] , _A : Optional[int] ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
__magic_name__ : List[str] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __lowerCAmelCase ( self : str , _A : Dict , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[int] , _A : Dict ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
__magic_name__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __lowerCAmelCase ( self : str , _A : Tuple , _A : Optional[Any] , _A : Union[str, Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ : Optional[Any] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __lowerCAmelCase ( self : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : List[Any] , _A : List[str] , _A : str ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
__magic_name__ : str = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def __lowerCAmelCase ( self : str , _A : Optional[Any] ) -> int:
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def __lowerCAmelCase ( self : Optional[Any] , _A : int , _A : List[str] , _A : Tuple , _A : Union[str, Any] , _A : Optional[int] ) -> int:
__magic_name__ : Any = tmp_path_factory.mktemp('data' ) / F'test.json.{extension}'
__magic_name__ : Tuple = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , 'rb' , compression='infer' ) as f:
__magic_name__ : Optional[Any] = f.read()
with fsspec.open(lowercase_ , 'rb' , compression='infer' ) as f:
__magic_name__ : Optional[Any] = f.read()
assert exported_content == original_content | 331 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 264 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase = random.Random()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Dict:
"""simple docstring"""
if rng is None:
snake_case_ = global_rng
snake_case_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=4_00 , _UpperCAmelCase=20_00 , _UpperCAmelCase=24 , _UpperCAmelCase=24 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1_60_00 , _UpperCAmelCase=True , _UpperCAmelCase=True , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = min_seq_length
snake_case_ = max_seq_length
snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case_ = feature_size
snake_case_ = num_mel_bins
snake_case_ = padding_value
snake_case_ = sampling_rate
snake_case_ = return_attention_mask
snake_case_ = do_normalize
def UpperCamelCase__ ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
snake_case_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ):
snake_case_ = SpeechaTextFeatureExtractionTester(self )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
snake_case_ = feature_extractor(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
snake_case_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test batched
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_features
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case_ = np.asarray(_UpperCAmelCase )
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_features
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 16, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = feature_extractor(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase )
snake_case_ = inputs.input_features
snake_case_ = inputs.attention_mask
snake_case_ = [np.sum(_UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case_ = [None, 16, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = feature_extractor(
_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' , return_attention_mask=_UpperCAmelCase )
snake_case_ = inputs.input_features
snake_case_ = inputs.attention_mask
snake_case_ = [np.sum(_UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feature_extractor(
_UpperCAmelCase , padding='''max_length''' , max_length=4 , truncation=_UpperCAmelCase , return_tensors='''np''' , return_attention_mask=_UpperCAmelCase , )
snake_case_ = inputs.input_features
snake_case_ = inputs.attention_mask
snake_case_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCamelCase__ ( self ):
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feature_extractor(
_UpperCAmelCase , padding='''longest''' , max_length=4 , truncation=_UpperCAmelCase , return_tensors='''np''' , return_attention_mask=_UpperCAmelCase , )
snake_case_ = inputs.input_features
snake_case_ = inputs.attention_mask
snake_case_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case_ = feature_extractor(
_UpperCAmelCase , padding='''longest''' , max_length=16 , truncation=_UpperCAmelCase , return_tensors='''np''' , return_attention_mask=_UpperCAmelCase , )
snake_case_ = inputs.input_features
snake_case_ = inputs.attention_mask
snake_case_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCamelCase__ ( self ):
import torch
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = np.random.rand(1_00 , 32 ).astype(np.floataa )
snake_case_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
from datasets import load_dataset
snake_case_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case_ = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ):
# fmt: off
snake_case_ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
snake_case_ = self._load_datasamples(1 )
snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) ) | 267 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 267 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = ShapEImgaImgPipeline
_a = ["image"]
_a = ["image"]
_a = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_a = False
@property
def a__ ( self ) -> Optional[int]:
return 32
@property
def a__ ( self ) -> Tuple:
return 32
@property
def a__ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def a__ ( self ) -> Optional[int]:
return 8
@property
def a__ ( self ) -> List[Any]:
torch.manual_seed(0 )
_A : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A : List[Any] = CLIPVisionModel(_a )
return model
@property
def a__ ( self ) -> Dict:
_A : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def a__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : str = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_A : Optional[int] = PriorTransformer(**_a )
return model
@property
def a__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_A : List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_A : int = ShapERenderer(**_a )
return model
def a__ ( self ) -> int:
_A : Optional[Any] = self.dummy_prior
_A : Tuple = self.dummy_image_encoder
_A : str = self.dummy_image_processor
_A : Any = self.dummy_renderer
_A : str = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
_A : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def a__ ( self , _a , _a=0 ) -> int:
_A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
_A : List[Any] = torch.manual_seed(_a )
else:
_A : Tuple = torch.Generator(device=_a ).manual_seed(_a )
_A : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def a__ ( self ) -> Tuple:
_A : List[Any] = """cpu"""
_A : List[str] = self.get_dummy_components()
_A : str = self.pipeline_class(**_a )
_A : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = pipe(**self.get_dummy_inputs(_a ) )
_A : Tuple = output.images[0]
_A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self ) -> Tuple:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self ) -> Tuple:
_A : List[str] = torch_device == """cpu"""
_A : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def a__ ( self ) -> Tuple:
_A : int = self.get_dummy_components()
_A : Optional[int] = self.pipeline_class(**_a )
_A : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : int = 1
_A : List[str] = 2
_A : str = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
_A : str = batch_size * [inputs[key]]
_A : Union[str, Any] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
_A : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
_A : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
_A : List[Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
_A : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_A : Tuple = torch.Generator(device=_a ).manual_seed(0 )
_A : Optional[Any] = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 26 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : Optional[int] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class snake_case__ :
def __init__( self : Tuple , __a : dict[str, list[str]] , __a : str ) -> None:
'''simple docstring'''
__snake_case : Optional[Any] = graph
# mapping node to its parent in resulting breadth first tree
__snake_case : dict[str, str | None] = {}
__snake_case : Optional[int] = source_vertex
def A_ ( self : Tuple ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = {self.source_vertex}
__snake_case : Any = None
__snake_case : Union[str, Any] = [self.source_vertex] # first in first out queue
while queue:
__snake_case : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__a )
__snake_case : Tuple = vertex
queue.append(__a )
def A_ ( self : Optional[int] , __a : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__snake_case : str = self.parent.get(__a )
if target_vertex_parent is None:
__snake_case : Dict = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__a )
return self.shortest_path(__a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
A__ : Any = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 0 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''[email protected]'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 0 | 1 |
'''simple docstring'''
import logging
from transformers import PretrainedConfig
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""bertabs"""
def __init__( self : int , _a : Union[str, Any]=3_0522 , _a : int=512 , _a : Optional[int]=6 , _a : Union[str, Any]=512 , _a : Any=8 , _a : Optional[Any]=512 , _a : List[str]=0.2 , _a : str=6 , _a : Union[str, Any]=768 , _a : Optional[int]=8 , _a : Dict=2048 , _a : Dict=0.2 , **_a : str , ) -> List[str]:
super().__init__(**_a )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Any = max_pos
__lowerCamelCase : Dict = enc_layers
__lowerCamelCase : Any = enc_hidden_size
__lowerCamelCase : Optional[int] = enc_heads
__lowerCamelCase : List[str] = enc_ff_size
__lowerCamelCase : Tuple = enc_dropout
__lowerCamelCase : Tuple = dec_layers
__lowerCamelCase : Any = dec_hidden_size
__lowerCamelCase : str = dec_heads
__lowerCamelCase : Tuple = dec_ff_size
__lowerCamelCase : List[str] = dec_dropout
| 208 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_UpperCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =["""input_ids""", """attention_mask"""]
a_ =MBartTokenizer
a_ =[]
a_ =[]
def __init__( self : Optional[Any] , _a : Optional[int]=None , _a : Any=None , _a : Any="<s>" , _a : Optional[Any]="</s>" , _a : List[str]="</s>" , _a : List[Any]="<s>" , _a : Union[str, Any]="<unk>" , _a : str="<pad>" , _a : Any="<mask>" , _a : Optional[Any]=None , _a : str=None , _a : Tuple=None , **_a : Dict , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : List[str] = False if not self.vocab_file else True
__lowerCamelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__lowerCamelCase : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
__lowerCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase ( self : Union[str, Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : List[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : int , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , _a : Optional[Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[int] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowerCamelCase : Optional[Any] = src_lang
__lowerCamelCase : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = tgt_lang_id
return inputs
def _lowercase ( self : Any , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Tuple , ) -> BatchEncoding:
__lowerCamelCase : List[Any] = src_lang
__lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowercase ( self : List[Any] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Tuple , _a : List[str] ) -> None:
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Optional[Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(_a )
__lowerCamelCase : int = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowerCamelCase : List[str] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 208 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case = False
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self , _UpperCamelCase=32 ):
"""simple docstring"""
set_seed(0 )
_lowercase : List[str] = UNetaDModel(sample_size=_UpperCamelCase , in_channels=3 , out_channels=3 )
_lowercase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_lowercase : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=_UpperCamelCase , )
_lowercase : Any = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=_UpperCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_lowercase : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_UpperCamelCase ) for _ in range(4 )]
_lowercase : List[Any] = [torch.randn((4, 3, 32, 32) ).to(_UpperCamelCase ) for _ in range(4 )]
_lowercase : Any = [torch.randint(0 , 1000 , (4,) ).long().to(_UpperCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
_lowercase : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowercase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowercase : Any = model(_UpperCamelCase , timesteps[i] ).sample
_lowercase : int = torch.nn.functional.mse_loss(_UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_lowercase : Any = self.get_model_optimizer(resolution=32 )
model.train().to(_UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
_lowercase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_lowercase : List[str] = model(_UpperCamelCase , timesteps[i] ).sample
_lowercase : int = torch.nn.functional.mse_loss(_UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-5 ) )
| 360 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = '▁'
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_snake_case = {
'google/pegasus-xsum': 512,
}
_snake_case = logging.get_logger(__name__)
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , _UpperCamelCase , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<mask_2>" , _UpperCamelCase="<mask_1>" , _UpperCamelCase=None , _UpperCamelCase=103 , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_UpperCamelCase )}, but is'''
f''' {type(_UpperCamelCase )}''' )
_lowercase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_UpperCamelCase ) , self.offset - 1 )
]
if len(set(_UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_lowercase : List[str] = additional_special_tokens_extended
else:
_lowercase : Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
_lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , mask_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token_sent=_UpperCamelCase , offset=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
_lowercase : Union[str, Any] = mask_token_sent
_lowercase : str = vocab_file
_lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
# add special tokens to encoder dict
_lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Tuple = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.__dict__.copy()
_lowercase : Union[str, Any] = None
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowercase : List[Any] = {}
_lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_lowercase : int = self.sp_model.piece_to_id(_UpperCamelCase )
return sp_id + self.offset
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_lowercase : Any = self.sp_model.IdToPiece(index - self.offset )
return token
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Optional[int] = []
_lowercase : Optional[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_lowercase : Tuple = []
else:
current_sub_tokens.append(_UpperCamelCase )
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def _lowerCamelCase ( self , _UpperCamelCase=False ):
"""simple docstring"""
return 1
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_UpperCamelCase )
elif token_ids_a is None:
return self._special_token_mask(_UpperCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 199 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.