code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : int = 32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase :List[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase :List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase :Union[str, Any] = config["""lr"""]
UpperCamelCase :List[str] = int(config["""num_epochs"""] )
UpperCamelCase :str = int(config["""seed"""] )
UpperCamelCase :Dict = int(config["""batch_size"""] )
UpperCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCamelCase :Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase :Any = 1
UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase :int = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase :Tuple = 0
# Now we train the model
UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Dict = outputs.loss
UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase :str = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__magic_name__ ) - 1:
UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCamelCase :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __magic_name__ )
UpperCamelCase :Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase :str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , )
parser.add_argument(
"""--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowercase :
def __init__( self : List[Any] , snake_case : Optional[int] , snake_case : str=1_3 , snake_case : Tuple=3_0 , snake_case : Any=2 , snake_case : Optional[int]=3 , snake_case : str=True , snake_case : Tuple=True , snake_case : Optional[Any]=3_2 , snake_case : Optional[Any]=2 , snake_case : Optional[int]=4 , snake_case : Tuple=3_7 , snake_case : Dict="gelu" , snake_case : Tuple=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=1_0 , snake_case : List[str]=0.02 , snake_case : Optional[int]=3 , snake_case : Union[str, Any]=None , snake_case : Union[str, Any]=2 , ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : Optional[int] = batch_size
UpperCamelCase_ : List[Any] = image_size
UpperCamelCase_ : Tuple = patch_size
UpperCamelCase_ : str = num_channels
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : List[str] = use_labels
UpperCamelCase_ : List[Any] = hidden_size
UpperCamelCase_ : Dict = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : str = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : str = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = type_sequence_label_size
UpperCamelCase_ : Dict = initializer_range
UpperCamelCase_ : Optional[Any] = scope
UpperCamelCase_ : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase_ : int = (image_size // patch_size) ** 2
UpperCamelCase_ : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Any = None
if self.use_labels:
UpperCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[int] , snake_case : int , snake_case : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = TFDeiTModel(config=snake_case )
UpperCamelCase_ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Any , snake_case : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = TFDeiTForMaskedImageModeling(config=snake_case )
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase_ : List[Any] = 1
UpperCamelCase_ : List[Any] = TFDeiTForMaskedImageModeling(snake_case )
UpperCamelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.type_sequence_label_size
UpperCamelCase_ : int = TFDeiTForImageClassification(snake_case )
UpperCamelCase_ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ : Optional[Any] = 1
UpperCamelCase_ : List[str] = TFDeiTForImageClassification(snake_case )
UpperCamelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ : int = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = self.prepare_config_and_inputs()
UpperCamelCase_ : Optional[Any] = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = TFDeiTModelTester(self )
UpperCamelCase_ : Any = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , tf.keras.layers.Dense ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = model_class(snake_case )
UpperCamelCase_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : str = [*signature.parameters.keys()]
UpperCamelCase_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : str = TFDeiTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
UpperCamelCase_ : Optional[int] = self.default_image_processor
UpperCamelCase_ : int = prepare_img()
UpperCamelCase_ : int = image_processor(images=snake_case , return_tensors='tf' )
# forward pass
UpperCamelCase_ : str = model(**snake_case )
# verify the logits
UpperCamelCase_ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCamelCase_ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 360 | a_ = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 50 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ = logging.get_logger(__name__)
class A_ ( __snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ["""pixel_values"""]
def __init__( self : Optional[int] , lowercase_ : List[str] = True , lowercase_ : Union[str, Any] = None , lowercase_ : str = PILImageResampling.BILINEAR , lowercase_ : Tuple = True , lowercase_ : Tuple = None , lowercase_ : List[Any] = True , lowercase_ : int = 1 / 255 , lowercase_ : Optional[Any] = True , lowercase_ : Tuple = None , lowercase_ : Optional[int] = None , **lowercase_ : Tuple , ) -> Any:
super().__init__(**a_ )
UpperCAmelCase : Any = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase : Tuple = get_size_dict(a_ , default_to_square=a_ )
UpperCAmelCase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : Union[str, Any] = get_size_dict(a_ , param_name='crop_size' )
UpperCAmelCase : Optional[int] = do_resize
UpperCAmelCase : Tuple = size
UpperCAmelCase : Optional[int] = resample
UpperCAmelCase : str = do_center_crop
UpperCAmelCase : str = crop_size
UpperCAmelCase : Tuple = do_rescale
UpperCAmelCase : Tuple = rescale_factor
UpperCAmelCase : Dict = do_normalize
UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] = PILImageResampling.BICUBIC , lowercase_ : int = None , **lowercase_ : List[Any] , ) -> int:
UpperCAmelCase : Dict = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase : Tuple = get_resize_output_image_size(a_ , size=size['shortest_edge'] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Dict = None , **lowercase_ : Optional[Any] , ) -> int:
UpperCAmelCase : List[Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a_ , size=(size['height'], size['width']) , data_format=a_ , **a_ )
def UpperCAmelCase_ ( self : str , lowercase_ : List[str] , lowercase_ : str , lowercase_ : int = None , **lowercase_ : Optional[int] ) -> Tuple:
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Optional[int] = None , **lowercase_ : int , ) -> Union[str, Any]:
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Any , lowercase_ : Optional[int] = None , lowercase_ : Dict = None , lowercase_ : Union[str, Any] = None , lowercase_ : Union[str, Any] = None , lowercase_ : str = None , lowercase_ : Tuple = None , lowercase_ : int = None , lowercase_ : Dict = None , lowercase_ : int = None , lowercase_ : int = None , lowercase_ : Union[str, Any] = None , lowercase_ : int = ChannelDimension.FIRST , **lowercase_ : Optional[Any] , ) -> Optional[Any]:
UpperCAmelCase : int = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Optional[Any] = size if size is not None else self.size
UpperCAmelCase : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
UpperCAmelCase : str = resample if resample is not None else self.resample
UpperCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : List[Any] = get_size_dict(a_ , param_name='crop_size' )
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
UpperCAmelCase : Dict = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase : List[str] = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCAmelCase : List[Any] = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
UpperCAmelCase : Dict = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
UpperCAmelCase : List[Any] = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
UpperCAmelCase : Any = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
UpperCAmelCase : Tuple = [to_channel_dimension_format(a_ , a_ ) for image in images]
UpperCAmelCase : str = {'''pixel_values''': images}
return BatchFeature(data=a_ , tensor_type=a_ )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Tuple = None ) -> Union[str, Any]:
UpperCAmelCase : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a_ ):
UpperCAmelCase : Optional[Any] = target_sizes.numpy()
UpperCAmelCase : Dict = []
for idx in range(len(a_ ) ):
UpperCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a_ )
UpperCAmelCase : int = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
UpperCAmelCase : int = logits.argmax(dim=1 )
UpperCAmelCase : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 151 |
"""simple docstring"""
import numpy as np
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] ) ->Dict:
"""simple docstring"""
__snake_case : Union[str, Any] = int(np.ceil((x_end - xa) / h ) )
__snake_case : Dict = np.zeros((n + 1,) )
__snake_case : List[Any] = ya
__snake_case : int = xa
for k in range(_snake_case ):
__snake_case : Any = f(_snake_case , y[k] )
__snake_case : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__snake_case : Optional[int] = f(x + h , y[k] + h * ka )
__snake_case : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowercase ( lowerCamelCase_ ):
"""simple docstring"""
lowercase__ = """mgp-str"""
def __init__( self : Optional[int] , UpperCamelCase__ : str=[32, 128] , UpperCamelCase__ : int=4 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : str=27 , UpperCamelCase__ : Dict=38 , UpperCamelCase__ : Any=50257 , UpperCamelCase__ : str=30522 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=4.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=0.02 , **UpperCamelCase__ : int , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =max_token_length
__UpperCamelCase =num_character_labels
__UpperCamelCase =num_bpe_labels
__UpperCamelCase =num_wordpiece_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =mlp_ratio
__UpperCamelCase =distilled
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =drop_rate
__UpperCamelCase =qkv_bias
__UpperCamelCase =attn_drop_rate
__UpperCamelCase =drop_path_rate
__UpperCamelCase =output_aa_attentions
__UpperCamelCase =initializer_range
| 367 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any]=False ):
"""simple docstring"""
__UpperCamelCase =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase =''''''
else:
__UpperCamelCase ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase =state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase =in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase =in_proj_bias[: config.hidden_size]
__UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =dct.pop(__UpperCamelCase )
__UpperCamelCase =val
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict=True ):
"""simple docstring"""
__UpperCamelCase =ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCamelCase =8
# set labels if required
if not base_model:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCamelCase =3_8_4
__UpperCamelCase =1_5_3_6
__UpperCamelCase =1_2
__UpperCamelCase =6
# load original model from torch hub
__UpperCamelCase =torch.hub.load('''facebookresearch/dino:main''' , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase =original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__UpperCamelCase =create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__UpperCamelCase =ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__UpperCamelCase =ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCamelCase =ViTImageProcessor()
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase =encoding['''pixel_values''']
__UpperCamelCase =model(__UpperCamelCase )
if base_model:
__UpperCamelCase =original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__UpperCamelCase =original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1E-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 85 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[str] = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=a__ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_lowercase : Any = tmp_path_factory.getbasetemp() / 'cache'
_lowercase : Tuple = test_hf_cache_home / 'datasets'
_lowercase : List[str] = test_hf_cache_home / 'metrics'
_lowercase : Optional[Any] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(a__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(a__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(a__ ) )
_lowercase : List[Any] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(a__ ) )
_lowercase : Optional[Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a__ ) )
@pytest.fixture(autouse=a__ , scope='session' )
def UpperCamelCase_( ) -> Dict:
datasets.disable_progress_bar()
@pytest.fixture(autouse=a__ )
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , a__ )
@pytest.fixture
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , a__ )
| 21 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ : Optional[int] =getLogger(__name__)
lowerCAmelCase__ : List[str] ='''cuda''' if torch.cuda.is_available() else '''cpu'''
def __lowercase ( a__ , a__ , a__ , a__ = 8 , a__ = DEFAULT_DEVICE , a__=False , a__="summarization" , a__=None , **a__ , ) -> Dict:
__SCREAMING_SNAKE_CASE = Path(a__ ).open('w' , encoding='utf-8' )
__SCREAMING_SNAKE_CASE = str(a__ )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(a__ ).to(a__ )
if fpaa:
__SCREAMING_SNAKE_CASE = model.half()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__SCREAMING_SNAKE_CASE = time.time()
# update config with task specific params
use_task_specific_params(a__ , a__ )
if prefix is None:
__SCREAMING_SNAKE_CASE = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(a__ , a__ ) ) ):
__SCREAMING_SNAKE_CASE = [prefix + text for text in examples_chunk]
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' , truncation=a__ , padding='longest' ).to(a__ )
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **a__ , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
__SCREAMING_SNAKE_CASE = int(time.time() - start_time ) # seconds
__SCREAMING_SNAKE_CASE = len(a__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __lowercase ( ) -> Any:
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def __lowercase ( a__=True ) -> int:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('model_name' , type=a__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=a__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=a__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=a__ , required=a__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=a__ , required=a__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=a__ , required=a__ , default=a__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=a__ , required=a__ , default=a__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=a__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=a__ , default=8 , required=a__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=a__ , default=-1 , required=a__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=a__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args()
__SCREAMING_SNAKE_CASE = parse_numeric_n_bool_cl_kwargs(a__ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__SCREAMING_SNAKE_CASE = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__SCREAMING_SNAKE_CASE = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=a__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
__SCREAMING_SNAKE_CASE = generate_summaries_or_translations(
a__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **a__ , )
if args.reference_path is None:
return {}
# Compute scores
__SCREAMING_SNAKE_CASE = calculate_bleu if 'translation' in args.task else calculate_rouge
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.save_path ).readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(a__ )]
__SCREAMING_SNAKE_CASE = score_fn(a__ , a__ )
scores.update(a__ )
if args.dump_args:
scores.update(a__ )
if args.info:
__SCREAMING_SNAKE_CASE = args.info
if verbose:
print(a__ )
if args.score_path is not None:
json.dump(a__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 257 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = 384
_UpperCAmelCase : Any = 7
if "tiny" in model_name:
_UpperCAmelCase : List[str] = 96
_UpperCAmelCase : List[Any] = (2, 2, 6, 2)
_UpperCAmelCase : List[str] = (3, 6, 12, 24)
elif "small" in model_name:
_UpperCAmelCase : List[Any] = 96
_UpperCAmelCase : str = (2, 2, 18, 2)
_UpperCAmelCase : int = (3, 6, 12, 24)
elif "base" in model_name:
_UpperCAmelCase : List[str] = 128
_UpperCAmelCase : Any = (2, 2, 18, 2)
_UpperCAmelCase : str = (4, 8, 16, 32)
_UpperCAmelCase : Optional[int] = 12
_UpperCAmelCase : Dict = 512
elif "large" in model_name:
_UpperCAmelCase : int = 192
_UpperCAmelCase : str = (2, 2, 18, 2)
_UpperCAmelCase : Union[str, Any] = (6, 12, 24, 48)
_UpperCAmelCase : Dict = 12
_UpperCAmelCase : List[Any] = 768
# set label information
_UpperCAmelCase : Optional[Any] = 150
_UpperCAmelCase : Tuple = '''huggingface/label-files'''
_UpperCAmelCase : Dict = '''ade20k-id2label.json'''
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = SwinConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , num_heads=UpperCamelCase__ , window_size=UpperCamelCase__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
_UpperCAmelCase : Dict = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : Optional[int] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', F'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', F'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.weight', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.norm2.bias', F'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', F'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', F'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.stages.{i}.downsample.reduction.weight', F'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.weight', F'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.stages.{i}.downsample.norm.bias', F'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
_UpperCAmelCase : List[str] = dct.pop(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = val
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCAmelCase : Union[str, Any] = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
_UpperCAmelCase : Optional[Any] = state_dict.pop(F'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[:dim, :]
_UpperCAmelCase : Optional[Any] = in_proj_bias[: dim]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
_UpperCAmelCase : str = in_proj_bias[
dim : dim * 2
]
_UpperCAmelCase : List[Any] = in_proj_weight[
-dim :, :
]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase_ (UpperCamelCase__ : int ):
_UpperCAmelCase , _UpperCAmelCase : str = x.shape
_UpperCAmelCase : Union[str, Any] = x.reshape(UpperCamelCase__ , 4 , in_channel // 4 )
_UpperCAmelCase : List[str] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = x.shape
_UpperCAmelCase : Any = x.reshape(UpperCamelCase__ , in_channel // 4 , 4 )
_UpperCAmelCase : Tuple = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase__ , UpperCamelCase__ )
return x
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = x.shape[0]
_UpperCAmelCase : Tuple = x.reshape(4 , in_channel // 4 )
_UpperCAmelCase : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
_UpperCAmelCase : str = x.shape[0]
_UpperCAmelCase : List[Any] = x.reshape(in_channel // 4 , 4 )
_UpperCAmelCase : Optional[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase__ )
return x
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
_UpperCAmelCase : int = model_name_to_url[model_name]
_UpperCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , file_name=UpperCamelCase__ )[
'''state_dict'''
]
for name, param in state_dict.items():
print(UpperCamelCase__ , param.shape )
_UpperCAmelCase : Optional[int] = get_upernet_config(UpperCamelCase__ )
_UpperCAmelCase : List[str] = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_UpperCAmelCase : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
_UpperCAmelCase : Optional[Any] = key.replace('''bn''' , '''batch_norm''' )
_UpperCAmelCase : List[Any] = val
# rename keys
_UpperCAmelCase : Optional[int] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_UpperCAmelCase : Optional[Any] = reverse_correct_unfold_reduction_order(UpperCamelCase__ )
if "norm" in key:
_UpperCAmelCase : Tuple = reverse_correct_unfold_norm_order(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
_UpperCAmelCase : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
_UpperCAmelCase : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
_UpperCAmelCase : Optional[int] = SegformerImageProcessor()
_UpperCAmelCase : List[Any] = processor(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_UpperCAmelCase : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
_UpperCAmelCase : str = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
_UpperCAmelCase : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase :int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 68 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =42
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''Translation''' ,init=a ,repr=a )
def __call__( self ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None
# Automatically constructed
a__ ="dict"
a__ =None
a__ =field(default='''TranslationVariableLanguages''' ,init=a ,repr=a )
def __lowerCAmelCase ( self ) -> Dict:
_UpperCAmelCase : int = sorted(set(self.languages ) ) if self.languages else None
_UpperCAmelCase : List[str] = len(self.languages ) if self.languages else None
def __call__( self ) -> str:
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : List[str] = set(self.languages )
if self.languages and set(A ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(A ) - lang_set ) )}) are not in valid set ({", ".join(A )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(A , A ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = zip(*sorted(A ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 68 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( A__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = '''encoder-decoder'''
_snake_case : List[str] = True
def __init__( self : str , **lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = True
@classmethod
def snake_case__ ( cls : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[int] ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 324 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase__ (_UpperCAmelCase=None):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('env')
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate env command')
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.')
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase)
return parser
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.__version__
SCREAMING_SNAKE_CASE = torch.cuda.is_available()
SCREAMING_SNAKE_CASE = is_xpu_available()
SCREAMING_SNAKE_CASE = is_npu_available()
SCREAMING_SNAKE_CASE = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file).to_dict()
SCREAMING_SNAKE_CASE = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(_UpperCAmelCase),
'PyTorch NPU available': str(_UpperCAmelCase),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n')
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()]))
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:')
SCREAMING_SNAKE_CASE = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(_UpperCAmelCase , _UpperCAmelCase)
else F'''\t{accelerate_config}'''
)
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = accelerate_config
return info
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = env_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(_UpperCAmelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 137 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__A : Union[str, Any] = 'src/diffusers'
# Matches is_xxx_available()
__A : Dict = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__A : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__A : Optional[Any] = '\n{0} = None\n'
__A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__A : Tuple = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __UpperCamelCase ( _A : Optional[Any] ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(_A )
if len(_A ) == 0:
return None
return "_and_".join(_A )
def __UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
with open(os.path.join(_A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(_A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(_A ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_A ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _A : Union[str, Any] , _A : int ) ->Optional[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_A )
elif name.islower():
return DUMMY_FUNCTION.format(_A , _A )
else:
return DUMMY_CLASS.format(_A , _A )
def __UpperCamelCase ( _A : Any=None ) ->Any:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ="""[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase_ ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def __UpperCamelCase ( _A : Dict=False ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(_A , """utils""" )
lowerCamelCase_ ={
backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_A ):
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =""""""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 49 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : Optional[int] = pd.read_csv('sample_data.csv', header=None)
__A : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Tuple = df.iloc[:, 1:2]
__A : Tuple = actual_data.values.reshape(len_data, 1)
__A : str = MinMaxScaler().fit_transform(actual_data)
__A : List[str] = 10
__A : Any = 5
__A : Optional[Any] = 20
__A : List[str] = len_data - periods * look_back
__A : str = actual_data[:division]
__A : int = actual_data[division - look_back :]
__A, __A : List[str] = [], []
__A, __A : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : List[Any] = np.array(train_x)
__A : Tuple = np.array(test_x)
__A : Any = np.array([list(i.ravel()) for i in train_y])
__A : List[Any] = np.array([list(i.ravel()) for i in test_y])
__A : Union[str, Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__A : Tuple = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__A : Optional[int] = model.predict(x_test)
| 49 | 1 |
from scipy.stats import pearsonr
import datasets
UpperCAmelCase__ : str = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase__ : Union[str, Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase__ : List[Any] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__ ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=False ):
"""simple docstring"""
if return_pvalue:
_A: int = pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCAmelCase_ , lowerCAmelCase_ )[0] )}
| 121 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[tuple[int, int]]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = position
lowerCamelCase__ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ : Dict = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_UpperCAmelCase )
return permissible_positions
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if is_complete(_UpperCAmelCase ):
return True
for position in get_valid_pos(_UpperCAmelCase , len(_UpperCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = position
if board[y][x] == 0:
lowerCamelCase__ : List[Any] = curr + 1
if open_knight_tour_helper(_UpperCAmelCase , _UpperCAmelCase , curr + 1 ):
return True
lowerCamelCase__ : Optional[Any] = 0
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[list[int]]:
lowerCamelCase__ : Any = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = 1
if open_knight_tour_helper(_UpperCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
__a = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
__a = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
__a = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
__a = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 43 |
'''simple docstring'''
import math
import qiskit
def __snake_case( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
or isinstance(_lowerCAmelCase , _lowerCAmelCase )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != input_a)
or (math.floor(_lowerCAmelCase ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
snake_case__ : List[str] = qiskit.QuantumRegister(4 , """qr""" )
snake_case__ : Optional[int] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
snake_case__ : List[Any] = [input_a, input_a, carry_in]
snake_case__ : Union[str, Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowerCAmelCase ) # measure the last two qbits
snake_case__ : int = qiskit.Aer.get_backend("""aer_simulator""" )
snake_case__ : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(F"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 43 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def _SCREAMING_SNAKE_CASE (*snake_case__ : Union[str, Any] , **snake_case__ : Any ) -> List[str]:
'''simple docstring'''
pass
def UpperCamelCase ( __lowerCamelCase : Image ):
snake_case : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
A__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = DepthEstimationPipeline(model=snake_case__ , image_processor=snake_case__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , snake_case__ )
import datasets
snake_case : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
snake_case : Dict = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , snake_case__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def _SCREAMING_SNAKE_CASE (self : str ) -> List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = "Intel/dpt-large"
snake_case : Tuple = pipeline("depth-estimation" , model=snake_case__ )
snake_case : List[str] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
snake_case : Union[str, Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 59 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ )
self.assertEqual(
a__ , [
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
] , )
snake_case_ = text_generator.model.config.eos_token_id
snake_case_ = "<pad>"
snake_case_ = text_generator(
["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , )
self.assertEqual(
a__ , [
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
[
{"generated_token_ids": ANY(a__ )},
{"generated_token_ids": ANY(a__ )},
],
] , )
@require_tf
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
snake_case_ = text_generator("This is a test" , do_sample=a__ )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ )
self.assertEqual(
a__ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ )
return text_generator, ["This is a test", "Another test"]
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = "Hello I believe in"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
snake_case_ = text_generator(a__ )
self.assertEqual(
a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
snake_case_ = text_generator(a__ , stop_sequence=" fe" )
self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] )
def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = text_generator.model
snake_case_ = text_generator.tokenizer
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ )
snake_case_ = text_generator("This is a test" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
snake_case_ = text_generator("This is a test" , return_full_text=a__ )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
snake_case_ = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ )
self.assertEqual(
a__ , [
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
[{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}],
] , )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ )
with self.assertRaises(a__ ):
snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
snake_case_ = text_generator("" )
self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
snake_case_ = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(a__ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import torch
# Classic `model_kwargs`
snake_case_ = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
snake_case_ = pipe("This is a test" )
self.assertEqual(
a__ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
import torch
snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=a__ , top_p=0.5 )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = "Hello world"
snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
snake_case_ = logging.get_logger("transformers.generation.tf_utils" )
else:
snake_case_ = logging.get_logger("transformers.generation.utils" )
snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 )
self.assertIn(a__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_new_tokens=1 )
self.assertNotIn(a__ , cl.out )
with CaptureLogger(a__ ) as cl:
snake_case_ = text_generator(a__ , max_length=10 )
self.assertNotIn(a__ , cl.out )
| 85 | 0 |
from collections.abc import Callable
import numpy as np
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ = np.zeros((n + 1,) )
lowercase__ = ya
lowercase__ = xa
for k in range(__magic_name__ ):
lowercase__ = y[k] + step_size * ode_func(__magic_name__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _A ( __magic_name__=32 , __magic_name__=10 , __magic_name__=100 , __magic_name__=1026 , __magic_name__=True , __magic_name__="data/tokenized_stories_train_wikitext103.jbl" , __magic_name__="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__ = generate_datasets(
__magic_name__ , __magic_name__ , number=__magic_name__ , min_len=1026 , trim=__magic_name__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowercase__ = load_gpta("gpt2" ).to(__magic_name__ )
print("computing perplexity on objective set" )
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ ).item()
print("perplexity on objective set:" , __magic_name__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _A ( __magic_name__ , __magic_name__=15 , __magic_name__=128 , __magic_name__=100 , __magic_name__="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowercase__ = SecondaryLearner(__magic_name__ )
# Train secondary learner
lowercase__ = train_secondary_learner(
__magic_name__ , __magic_name__ , max_epochs=__magic_name__ , batch_size=__magic_name__ , eval_freq=100 , igf_model_path=__magic_name__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=32 , __magic_name__=1000 , __magic_name__=16 , __magic_name__=1.0 , __magic_name__=recopy_gpta , __magic_name__=None , __magic_name__=10 , __magic_name__="gpt2_finetuned.pt" , ):
lowercase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ )
lowercase__ = max_steps // (len(__magic_name__ )) + 1
lowercase__ = 0
lowercase__ = torch.zeros((1, context_len) , dtype=torch.long , device=__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = recopy_model(__magic_name__ , __magic_name__ , __magic_name__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(__magic_name__ )
secondary_learner.eval()
lowercase__ = []
lowercase__ = 0
lowercase__ = []
lowercase__ = []
# Compute the performance of the transformer model at the beginning
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
for epoch in range(int(__magic_name__ ) ):
for step, example in enumerate(__magic_name__ ):
torch.cuda.empty_cache()
lowercase__ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__ = model(__magic_name__ , labels=__magic_name__ )
lowercase__ = True
if secondary_learner is not None:
lowercase__ = secondary_learner.forward(
torch.tensor(__magic_name__ , dtype=torch.long , device=__magic_name__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__magic_name__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__ = -1
if predicted_q < threshold:
lowercase__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__ = compute_perplexity(__magic_name__ , __magic_name__ , __magic_name__ )
test_perps.append(__magic_name__ )
print("Test perplexity, step" , __magic_name__ , ":" , __magic_name__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __magic_name__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _A ( ):
lowercase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__magic_name__ , default=__magic_name__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__magic_name__ , default=__magic_name__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__magic_name__ , default=__magic_name__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__magic_name__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__magic_name__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__magic_name__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__magic_name__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__magic_name__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__magic_name__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__magic_name__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__magic_name__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__magic_name__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__magic_name__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__magic_name__ , type=__magic_name__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__magic_name__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__magic_name__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__magic_name__ , type=__magic_name__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__magic_name__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
lowercase__ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowercase__ = training_secondary_learner(
__magic_name__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
lowercase__ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__magic_name__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__magic_name__ , __magic_name__ , __magic_name__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__magic_name__ , secondary_learner=__magic_name__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 201 | 1 |
lowerCAmelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[str] ) -> list[str]:
'''simple docstring'''
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(SCREAMING_SNAKE_CASE_ )
new_path.append(SCREAMING_SNAKE_CASE_ )
queue.append(SCREAMING_SNAKE_CASE_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE_ )
# in case there's no path between the 2 nodes
return []
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: dict , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(SCREAMING_SNAKE_CASE_ )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE_ )
queue.append(SCREAMING_SNAKE_CASE_ )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : str = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __SCREAMING_SNAKE_CASE ( A__ ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = '''convbert'''
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=7_6_8 , snake_case_=2 , snake_case_=9 , snake_case_=1 , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Any = embedding_size
UpperCAmelCase_ : Tuple = head_ratio
UpperCAmelCase_ : int = conv_kernel_size
UpperCAmelCase_ : List[str] = num_groups
UpperCAmelCase_ : List[str] = classifier_dropout
class __SCREAMING_SNAKE_CASE ( A__ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 353 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :List[Any] = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__snake_case :str = logging.get_logger(__name__)
__snake_case :int = {'''vocab_file''': '''vocab.txt'''}
__snake_case :List[Any] = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
__snake_case :List[str] = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
__snake_case :Optional[int] = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ConvBertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="[UNK]" , __SCREAMING_SNAKE_CASE : int="[SEP]" , __SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , __SCREAMING_SNAKE_CASE : int="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , __SCREAMING_SNAKE_CASE) != do_lower_case
or normalizer_state.get('''strip_accents''' , __SCREAMING_SNAKE_CASE) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __SCREAMING_SNAKE_CASE) != tokenize_chinese_chars
):
__a = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('''type'''))
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**__SCREAMING_SNAKE_CASE)
__a = do_lower_case
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=None):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
| 49 | 1 |
"""simple docstring"""
import datasets
__snake_case : Union[str, Any] = '\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n'
__snake_case : Optional[Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__snake_case : int = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
return {"accuracy": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase)} | 354 |
"""simple docstring"""
__snake_case : Dict = 65_521
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : str = 1
__lowerCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
__lowerCAmelCase : List[str] = (a + ord(__snake_case )) % MOD_ADLER
__lowerCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a | 58 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 43 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__UpperCamelCase :Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__UpperCamelCase :str = value
elif weight_type == "weight_g":
__UpperCamelCase :List[str] = value
elif weight_type == "weight_v":
__UpperCamelCase :str = value
elif weight_type == "bias":
__UpperCamelCase :Union[str, Any] = value
else:
__UpperCamelCase :str = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = []
__UpperCamelCase :int = fairseq_model.state_dict()
__UpperCamelCase :List[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCamelCase :List[str] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase :Dict = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__UpperCamelCase :Optional[Any] = True
if "*" in mapped_key:
__UpperCamelCase :List[str] = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
__UpperCamelCase :Optional[int] = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__UpperCamelCase :int = '''weight_g'''
elif "weight_v" in name:
__UpperCamelCase :List[Any] = '''weight_v'''
elif "weight" in name:
__UpperCamelCase :Dict = '''weight'''
elif "bias" in name:
__UpperCamelCase :Dict = '''bias'''
else:
__UpperCamelCase :Dict = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = full_name.split('''conv_layers.''' )[-1]
__UpperCamelCase :Optional[int] = name.split('''.''' )
__UpperCamelCase :str = int(items[0] )
__UpperCamelCase :List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__UpperCamelCase :Dict = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__UpperCamelCase :Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__UpperCamelCase :int = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__UpperCamelCase :Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase :Tuple = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :Optional[int] = HubertConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase :Optional[int] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase :Optional[int] = target_dict.pad_index
__UpperCamelCase :Dict = target_dict.bos_index
__UpperCamelCase :str = target_dict.eos_index
__UpperCamelCase :Dict = len(target_dict.symbols )
__UpperCamelCase :List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = True if config.feat_extract_norm == '''layer''' else False
__UpperCamelCase :Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Any = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase :str = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase :Dict = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 43 | 1 |
"""simple docstring"""
import numpy as np
def lowerCamelCase ( _UpperCamelCase : np.array ) -> np.array:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase ( _UpperCamelCase : np.array ) -> np.array:
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase : Optional[Any] = 'scheduler_config.json'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 1
__a = 2
__a = 3
__a = 4
__a = 5
__a = 6
__a = 7
__a = 8
__a = 9
__a = 10
__a = 11
__a = 12
__a = 13
__a = 14
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 42
class lowerCamelCase__ :
"""simple docstring"""
__a = SCHEDULER_CONFIG_NAME
__a = []
__a = True
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase : List[str] = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 320 | 1 |
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> float:
def get_matched_characters(__UpperCAmelCase: str , __UpperCAmelCase: str ) -> str:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Any = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ : Tuple = int(max(0 , i - limit ) )
UpperCamelCase__ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCAmelCase )
UpperCamelCase__ : List[str] = f"{_stra[0:_stra.index(__UpperCAmelCase )]} {_stra[_stra.index(__UpperCAmelCase ) + 1:]}"
return "".join(__UpperCAmelCase )
# matching characters
UpperCamelCase__ : str = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Optional[int] = get_matched_characters(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[Any] = len(__UpperCAmelCase )
# transposition
UpperCamelCase__ : List[str] = (
len([(ca, ca) for ca, ca in zip(__UpperCAmelCase , __UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ : List[Any] = 0.0
else:
UpperCamelCase__ : Union[str, Any] = (
1
/ 3
* (
match_count / len(__UpperCAmelCase )
+ match_count / len(__UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 201 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: complex , __UpperCAmelCase: str = "x" , __UpperCAmelCase: float = 10**-10 , __UpperCAmelCase: int = 1 , ) -> complex:
UpperCamelCase__ : Optional[int] = symbols(__UpperCAmelCase )
UpperCamelCase__ : Dict = lambdify(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = lambdify(__UpperCAmelCase , diff(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCamelCase__ : int = starting_point
while True:
if diff_function(__UpperCAmelCase ) != 0:
UpperCamelCase__ : Optional[int] = prev_guess - multiplicity * func(__UpperCAmelCase ) / diff_function(
__UpperCAmelCase )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
F'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
F'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 201 | 1 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = "pytorch_model.bin"
@dataclasses.dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :str = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
__SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=__a ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,)
@dataclasses.dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
__SCREAMING_SNAKE_CASE :str = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
__SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=__a ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
__SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default=__a ,metadata={"""help""": """The name of the task to train on."""} ,)
__SCREAMING_SNAKE_CASE :Optional[List[str]] = dataclasses.field(
default=__a ,metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :str = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
__SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} )
__SCREAMING_SNAKE_CASE :Optional[str] = dataclasses.field(
default="""no""" ,metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} ,)
__SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
__SCREAMING_SNAKE_CASE :Optional[float] = dataclasses.field(
default=0.0 ,metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} ,)
__SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=__a ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,)
__SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=__a ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,)
__SCREAMING_SNAKE_CASE :Optional[bool] = dataclasses.field(
default=__a ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,)
__SCREAMING_SNAKE_CASE :Optional[float] = dataclasses.field(
default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,)
__SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
__SCREAMING_SNAKE_CASE :Optional[int] = dataclasses.field(
default=__a ,metadata={"""help""": """Random seed for initialization."""} ,)
def UpperCamelCase ( a , a , a , a , a , a ) -> List[str]:
'''simple docstring'''
__magic_name__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__magic_name__ = dataset.filter(lambda a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__magic_name__ = int(eval_result * len(a ) )
print(a )
__magic_name__ = dataset.sort('''probability''' , reverse=a )
__magic_name__ = dataset.select(range(a ) )
__magic_name__ = dataset.remove_columns(['''label''', '''probability'''] )
__magic_name__ = dataset.rename_column('''prediction''' , '''label''' )
__magic_name__ = dataset.map(lambda a : {"label": idalabel[example["label"]]} )
__magic_name__ = dataset.shuffle(seed=args.seed )
__magic_name__ = os.path.join(a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(a , index=a )
else:
dataset.to_json(a )
def UpperCamelCase ( a , a , a , a , **a ) -> str:
'''simple docstring'''
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__magic_name__ = STModelArguments(model_name_or_path=a )
__magic_name__ = STDataArguments(train_file=a , infer_file=a )
__magic_name__ = STTrainingArguments(output_dir=a )
__magic_name__ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(a ).items():
setattr(a , a , a )
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
# Sanity checks
__magic_name__ = {}
__magic_name__ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__magic_name__ = args.train_file
__magic_name__ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__magic_name__ = args.eval_file
for key in data_files:
__magic_name__ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__magic_name__ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__magic_name__ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
__magic_name__ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=a )
os.makedirs(a , exist_ok=a )
accelerator.wait_for_everyone()
__magic_name__ = None
__magic_name__ = None
__magic_name__ = 0
__magic_name__ = False
# Show the progress bar
__magic_name__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__magic_name__ = data_dir_format(a )
assert os.path.exists(a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__magic_name__ = os.path.join(a , '''stage-1''' )
__magic_name__ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(a , a ):
arguments_dict.update({key: value} )
__magic_name__ = os.path.join(a , '''best-checkpoint''' , a )
if os.path.exists(a ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , a , a , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , a )
finetune(**a )
accelerator.wait_for_everyone()
assert os.path.exists(a )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__magic_name__ = os.path.join(a , '''best-checkpoint''' )
__magic_name__ = os.path.join(a , '''stage-2''' )
# Update arguments_dict
__magic_name__ = model_path
__magic_name__ = data_files['''train''']
__magic_name__ = current_output_dir
__magic_name__ = os.path.join(a , '''best-checkpoint''' , a )
if os.path.exists(a ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , a , a , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , a )
finetune(**a )
accelerator.wait_for_everyone()
assert os.path.exists(a )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , a )
__magic_name__ = iteration
__magic_name__ = data_dir_format(iteration + 1 )
__magic_name__ = AutoConfig.from_pretrained(os.path.join(a , '''best-checkpoint''' ) )
__magic_name__ = config.idalabel
__magic_name__ = os.path.join(a , '''eval_results_best-checkpoint.json''' )
__magic_name__ = os.path.join(a , '''test_results_best-checkpoint.json''' )
assert os.path.exists(a )
with open(a , '''r''' ) as f:
__magic_name__ = float(json.load(a )[args.eval_metric] )
__magic_name__ = os.path.join(a , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(a )
# Loading the dataset from local csv or json files.
__magic_name__ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
__magic_name__ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(a , exist_ok=a )
shutil.copy(a , os.path.join(a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(a ):
shutil.copy(a , os.path.join(a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(a , a , a , a , a , a )
accelerator.wait_for_everyone()
__magic_name__ = os.path.join(a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__magic_name__ = eval_result
if best_iteration is None:
__magic_name__ = new_iteration
__magic_name__ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__magic_name__ = new_iteration
__magic_name__ = new_eval_result
__magic_name__ = 0
else:
if new_eval_result == best_eval_result:
__magic_name__ = new_iteration
__magic_name__ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__magic_name__ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , a )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(a , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(a , '''eval_results_best-iteration.json''' ) , )
| 98 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( a , a , a , a=1024 ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = [], []
__magic_name__ = list(zip(a , a ) )
__magic_name__ , __magic_name__ = sorted_examples[0]
def is_too_big(a ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__magic_name__ = new_src + ''' ''' + src
__magic_name__ = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
__magic_name__ , __magic_name__ = src, tgt
else: # can fit, keep adding
__magic_name__ , __magic_name__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def UpperCamelCase ( a , a , a , a ) -> Any:
'''simple docstring'''
__magic_name__ = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ = [x.rstrip() for x in Path(a ).open().readlines()]
__magic_name__ , __magic_name__ = pack_examples(a , a , a , a )
print(F'''packed {split} split from {len(a )} examples -> {len(a )}.''' )
Path(save_path / F'''{split}.source''' ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / F'''{split}.target''' ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
__magic_name__ , __magic_name__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(a , save_path / F'''{split}.source''' )
shutil.copyfile(a , save_path / F'''{split}.target''' )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
__magic_name__ = parser.parse_args()
__magic_name__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 98 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
A : int = '''\
'''
A : Optional[int] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
A : List[str] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int = 16 , __lowerCAmelCase : bool = True , __lowerCAmelCase : List[Any]=None ) -> str:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = """cuda"""
else:
A__ = """cuda""" if torch.cuda.is_available() else """cpu"""
A__ = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
A__ = model.to(__lowerCAmelCase )
A__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=__lowerCAmelCase , ).to(__lowerCAmelCase )
A__ = encodings["""input_ids"""]
A__ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ):
A__ = min(start_index + batch_size , len(__lowerCAmelCase ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCAmelCase )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCAmelCase ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCAmelCase )}
| 274 |
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | 1 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowercase : Tuple = 6378137.0
lowercase : int = 6356752.314245
lowercase : Dict = 6378137
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
lowercase : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
lowercase : Optional[Any] = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowercase : Tuple = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
lowercase : int = radians(__UpperCAmelCase )
lowercase : int = radians(__UpperCAmelCase )
# Equation
lowercase : int = sin((phi_a - phi_a) / 2 )
lowercase : int = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase : Any = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 356 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'roformer'
def __init__( self , lowercase=50_000 , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3_072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_536 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=False , lowercase=True , **lowercase , ) -> Dict:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = rotary_value
lowerCAmelCase = use_cache
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 46 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->list[int]:
if num <= 0:
_SCREAMING_SNAKE_CASE = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = [True] * (num + 1)
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = int(math.sqrt(__lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __lowerCamelCase ):
if sieve[i] is True:
_SCREAMING_SNAKE_CASE = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 58 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCamelCase : Any = threading.Lock()
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[Any] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_lowerCamelCase : Tuple = logging.WARNING
_lowerCamelCase : Dict = True
def a__ ( ) -> Optional[int]:
UpperCAmelCase : Dict = os.getenv('''TRANSFORMERS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def a__ ( ) -> List[str]:
return __name__.split('''.''' )[0]
def a__ ( ) -> List[str]:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> List[Any]:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase : str = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase : Union[str, Any] = False
def a__ ( ) -> Union[str, Any]:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase : Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase : str = None
def a__ ( ) -> str:
return log_levels
def a__ ( UpperCAmelCase : Optional[str] = None ) -> int:
if name is None:
UpperCAmelCase : str = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( UpperCAmelCase : int ) -> Optional[int]:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a__ ( ) -> Optional[Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Optional[Any]:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> int:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Dict:
return set_verbosity(lowerCAmelCase__ )
def a__ ( ) -> Optional[Any]:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> Any:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( UpperCAmelCase : logging.Handler ) -> int:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def a__ ( UpperCAmelCase : logging.Handler ) -> Any:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
_configure_library_root_logger()
UpperCAmelCase : Tuple = False
def a__ ( ) -> str:
_configure_library_root_logger()
UpperCAmelCase : Dict = True
def a__ ( ) -> int:
UpperCAmelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase : Optional[int] = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(lowerCAmelCase__ )
def a__ ( ) -> int:
UpperCAmelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def a__ ( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCamelCase : Dict = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def a__ ( self : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> int:
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
_lowerCamelCase : Optional[int] = warning_once
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any], *__A : Any, **__A : List[Any] ): # pylint: disable=unused-argument
UpperCAmelCase : Optional[int] = args[0] if args else None
def __iter__( self : Dict ):
return iter(self._iterator )
def __getattr__( self : List[str], __A : List[Any] ):
def empty_fn(*__A : Tuple, **__A : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ):
return self
def __exit__( self : Tuple, __A : List[Any], __A : List[str], __A : List[str] ):
return
class SCREAMING_SNAKE_CASE :
def __call__( self : List[Any], *__A : List[str], **__A : List[Any] ):
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case_, **snake_case_ )
else:
return EmptyTqdm(*snake_case_, **snake_case_ )
def __magic_name__ ( self : str, *__A : Any, **__A : List[Any] ):
UpperCAmelCase : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_, **snake_case_ )
def __magic_name__ ( self : str ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCamelCase : Union[str, Any] = _tqdm_cls()
def a__ ( ) -> Any:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> Any:
global _tqdm_active
UpperCAmelCase : str = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> Dict:
global _tqdm_active
UpperCAmelCase : Optional[int] = False
hf_hub_utils.disable_progress_bars()
| 352 |
from ..utils import DummyObject, requires_backends
class __UpperCAmelCase ( metaclass=lowerCamelCase__ ):
UpperCamelCase = ["""onnx"""]
def __init__( self : int, *__A : Optional[Any], **__A : Dict ):
requires_backends(self, ['''onnx'''] )
@classmethod
def __magic_name__ ( cls : Any, *__A : Any, **__A : Dict ):
requires_backends(cls, ['''onnx'''] )
@classmethod
def __magic_name__ ( cls : Tuple, *__A : List[str], **__A : List[str] ):
requires_backends(cls, ['''onnx'''] )
| 99 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=sys.maxsize ) -> Union[str, Any]:
_a = '''bilinear'''
_a = max_size
_a = short_edge_length
def __call__( self , __UpperCAmelCase ) -> Tuple:
_a = []
for img in imgs:
_a , _a = img.shape[:2]
# later: provide list and randomly choose index for resize
_a = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_a = size * 1.0 / min(__UpperCAmelCase , __UpperCAmelCase )
if h < w:
_a , _a = size, scale * w
else:
_a , _a = scale * h, size
if max(__UpperCAmelCase , __UpperCAmelCase ) > self.max_size:
_a = self.max_size * 1.0 / max(__UpperCAmelCase , __UpperCAmelCase )
_a = newh * scale
_a = neww * scale
_a = int(neww + 0.5 )
_a = int(newh + 0.5 )
if img.dtype == np.uinta:
_a = Image.fromarray(__UpperCAmelCase )
_a = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_a = np.asarray(__UpperCAmelCase )
else:
_a = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_a = nn.functional.interpolate(
__UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=__UpperCAmelCase ).squeeze(0 )
img_augs.append(__UpperCAmelCase )
return img_augs
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ) -> List[Any]:
_a = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_a = cfg.INPUT.FORMAT
_a = cfg.SIZE_DIVISIBILITY
_a = cfg.PAD_VALUE
_a = cfg.INPUT.MAX_SIZE_TEST
_a = cfg.MODEL.DEVICE
_a = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_a = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_a = lambda __UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = tuple(max(__UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
_a = [im.shape[-2:] for im in images]
_a = [
nn.functional.pad(
__UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__UpperCAmelCase , __UpperCAmelCase )
]
return torch.stack(__UpperCAmelCase ), torch.tensor(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict:
with torch.no_grad():
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_a = [images]
if single_image:
assert len(__UpperCAmelCase ) == 1
for i in range(len(__UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__UpperCAmelCase , images.pop(__UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(__UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_a = torch.tensor([im.shape[:2] for im in images] )
_a = self.aug(__UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_a = [self.normalizer(__UpperCAmelCase ) for x in images]
# now pad them to do the following operations
_a , _a = self.pad(__UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_a = torch.true_divide(__UpperCAmelCase , __UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(_lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
_a , _a = box_size
tensor[:, 0].clamp_(min=0, max=_lowerCAmelCase )
tensor[:, 1].clamp_(min=0, max=_lowerCAmelCase )
tensor[:, 2].clamp_(min=0, max=_lowerCAmelCase )
tensor[:, 3].clamp_(min=0, max=_lowerCAmelCase ) | 320 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Tuple:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int:
if self.graph.get(__UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a = [[w, v]]
if not self.graph.get(__UpperCAmelCase ):
_a = []
def _UpperCAmelCase ( self ) -> int:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
_a = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
_a = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return sorted_nodes
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = {}
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict:
# check if the u exists
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a = [[w, v]]
# add the other way
if self.graph.get(__UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a = [[w, u]]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__UpperCAmelCase )
# the other way round
if self.graph.get(__UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict:
if s == d:
return []
_a = []
_a = []
if s == -2:
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple:
if c == -1:
_a = floor(random() * 10000 ) + 10
for i in range(__UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
_a = floor(random() * c ) + 1
if n != i:
self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]:
_a = deque()
_a = []
if s == -2:
_a = list(self.graph )[0]
d.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
while d:
_a = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict:
return len(self.graph[u] )
def _UpperCAmelCase ( self ) -> int:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return list(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
_a = []
_a = list(self.graph )[0]
stack.append(__UpperCAmelCase )
visited.append(__UpperCAmelCase )
_a = -2
_a = []
_a = s
_a = False
_a = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a = len(__UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a = True
if len(__UpperCAmelCase ) != 0:
_a = stack[len(__UpperCAmelCase ) - 1]
else:
_a = False
indirect_parents.append(__UpperCAmelCase )
_a = s
_a = ss
# check if se have reached the starting point
if len(__UpperCAmelCase ) == 0:
return False
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return list(self.graph )
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple:
_a = time()
self.dfs(__UpperCAmelCase , __UpperCAmelCase )
_a = time()
return end - begin
def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple:
_a = time()
self.bfs(__UpperCAmelCase )
_a = time()
return end - begin | 320 | 1 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int = 1_00 ) -> int:
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
snake_case__ = 42
snake_case__ = 42
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCamelCase__ : int ):
UpperCAmelCase__ = [[] for _ in range(lowerCamelCase__ )]
UpperCAmelCase__ = size
def __getitem__( self : int ,lowerCamelCase__ : int ):
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._size
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCamelCase__ ,lowerCamelCase__ ) )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : int ):
UpperCAmelCase__ = deque([start_vertex] )
UpperCAmelCase__ = [None] * self.size
UpperCAmelCase__ = 0
while queue:
UpperCAmelCase__ = queue.popleft()
UpperCAmelCase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase__ = current_distance + edge.weight
UpperCAmelCase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 1 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int)
UpperCamelCase__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
UpperCamelCase__ = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
UpperCamelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCamelCase__ = [0] * args.vocab_size
for k, v in counter.items():
UpperCamelCase__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 65 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 10 ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError("Invalid input" )
_UpperCAmelCase : str = 10**n
_UpperCAmelCase : Tuple = 28_433 * (pow(2 , 7_830_457 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 363 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , *,
UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int , UpperCamelCase__ : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = nn.Parameter(torch.zeros(UpperCamelCase__ ) )
# parameters for additional clip time embeddings
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# parameters for encoder hidden states
__magic_name__ = clip_extra_context_tokens
__magic_name__ = nn.Linear(
UpperCamelCase__ , self.clip_extra_context_tokens * cross_attention_dim )
__magic_name__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = nn.LayerNorm(UpperCamelCase__ )
def _lowercase ( self : Optional[int] , *, UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__magic_name__ = image_embeddings.shape[0]
__magic_name__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__magic_name__ = classifier_free_guidance_embeddings.expand(
UpperCamelCase__ , -1 )
__magic_name__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__magic_name__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__magic_name__ = self.embedding_proj(UpperCamelCase__ )
__magic_name__ = self.clip_image_embeddings_project_to_time_embeddings(UpperCamelCase__ )
__magic_name__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__magic_name__ = self.clip_extra_context_tokens_proj(UpperCamelCase__ )
__magic_name__ = clip_extra_context_tokens.reshape(UpperCamelCase__ , -1 , self.clip_extra_context_tokens )
__magic_name__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
__magic_name__ = self.encoder_hidden_states_proj(UpperCamelCase__ )
__magic_name__ = self.text_encoder_hidden_states_norm(UpperCamelCase__ )
__magic_name__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[Any] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 99 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 343 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls , _a , _a , _a ) -> Tuple:
return cls(common=_a , init_noise_sigma=_a , timesteps=_a )
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> Dict:
return True
@register_to_config
def __init__( self , _a = 1000 , _a = 0.0001 , _a = 0.02 , _a = "linear" , _a = None , _a = "fixed_small" , _a = True , _a = "epsilon" , _a = jnp.floataa , ) -> Tuple:
_A : Tuple = dtype
def a__ ( self , _a = None ) -> DDPMSchedulerState:
if common is None:
_A : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_A : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
_A : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_a , init_noise_sigma=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a = None ) -> jnp.ndarray:
return sample
def a__ ( self , _a , _a , _a = () ) -> DDPMSchedulerState:
_A : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_A : Dict = (jnp.arange(0 , _a ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_a , timesteps=_a , )
def a__ ( self , _a , _a , _a=None , _a=None ) -> Optional[int]:
_A : Optional[Any] = state.common.alphas_cumprod[t]
_A : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_A : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_A : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_A : Optional[Any] = jnp.clip(_a , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_A : Any = jnp.log(jnp.clip(_a , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_A : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_A : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_A : str = variance
_A : Union[str, Any] = state.common.betas[t]
_A : Tuple = (predicted_variance + 1) / 2
_A : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self , _a , _a , _a , _a , _a = None , _a = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
_A : Dict = timestep
if key is None:
_A : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_A , _A : List[str] = jnp.split(_a , sample.shape[1] , axis=1 )
else:
_A : int = None
# 1. compute alphas, betas
_A : int = state.common.alphas_cumprod[t]
_A : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_A : Union[str, Any] = 1 - alpha_prod_t
_A : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_A : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_A : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
_A : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_A : Union[str, Any] = jnp.clip(_a , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_A : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_A : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_A : Tuple = jax.random.split(_a , num=1 )
_A : Dict = jax.random.normal(_a , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_a , _a , predicted_variance=_a ) ** 0.5) * noise
_A : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_A : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_a , state=_a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return add_noise_common(state.common , _a , _a , _a )
def a__ ( self , _a , _a , _a , _a , ) -> jnp.ndarray:
return get_velocity_common(state.common , _a , _a , _a )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 343 | 1 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 1 |
def A ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list:
'''simple docstring'''
UpperCAmelCase_ = length or len(__UpperCAmelCase )
UpperCAmelCase_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ = list_data[i + 1], list_data[i]
UpperCAmelCase_ = True
return list_data if not swapped else bubble_sort(__UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 0 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
__A = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
__A = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = CamembertTokenizer
__magic_name__ :int = CamembertTokenizerFast
__magic_name__ :Optional[Any] = True
__magic_name__ :Any = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :Tuple = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_4 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = CamembertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCAmelCase__ :Dict = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :List[str] = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Dict = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :str = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCAmelCase__ :Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Union[str, Any] = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :str = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = self.get_rust_tokenizer()
lowerCAmelCase__ :Any = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :int = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCAmelCase__ :Any = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=__UpperCAmelCase , )
| 293 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Any = """sigmoid"""
__magic_name__ :Optional[Any] = """softmax"""
__magic_name__ :Optional[Any] = """none"""
@add_end_docstrings(
a , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def snake_case ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = tokenizer_kwargs
lowerCAmelCase__ :List[Any] = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
lowerCAmelCase__ :List[Any] = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ :int = top_k
lowerCAmelCase__ :Dict = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ :List[Any] = None
else:
lowerCAmelCase__ :Union[str, Any] = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ :List[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ :Optional[Any] = 'top_k' not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def snake_case ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ :str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ :int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
lowerCAmelCase__ :Optional[Any] = self.model.config.function_to_apply
else:
lowerCAmelCase__ :Dict = ClassificationFunction.NONE
lowerCAmelCase__ :int = model_outputs['logits'][0]
lowerCAmelCase__ :Union[str, Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ :Dict = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ :int = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ :Tuple = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ :Any = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ :List[str] = dict_scores[:top_k]
return dict_scores
| 293 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowercase = True
except (ImportError, AttributeError):
__lowercase = object
def lowercase ( *A_ , **A_ )-> int:
'''simple docstring'''
pass
__lowercase = False
__lowercase = logging.get_logger("""transformers-cli/serving""")
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : str = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A_ , args.host , args.port , args.workers )
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : dict
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[int]]
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Any
class _A ( _a ):
"""simple docstring"""
@staticmethod
def __snake_case ( __UpperCAmelCase : ArgumentParser):
a : str = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints.")
serve_parser.add_argument(
"--task" , type=__UpperCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=__UpperCAmelCase , default="localhost" , help="Interface the server will listen on.")
serve_parser.add_argument("--port" , type=__UpperCAmelCase , default=8888 , help="Port the serving will listen to.")
serve_parser.add_argument("--workers" , type=__UpperCAmelCase , default=1 , help="Number of http workers")
serve_parser.add_argument("--model" , type=__UpperCAmelCase , help="Model's name or path to stored model.")
serve_parser.add_argument("--config" , type=__UpperCAmelCase , help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer" , type=__UpperCAmelCase , help="Tokenizer name to use.")
serve_parser.add_argument(
"--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=__UpperCAmelCase)
def __init__( self : Optional[Any] , __UpperCAmelCase : Pipeline , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Optional[int] = pipeline
a : List[str] = host
a : Union[str, Any] = port
a : Dict = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately.")
else:
logger.info(f'''Serving model over {host}:{port}''')
a : Any = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=__UpperCAmelCase , response_class=__UpperCAmelCase , methods=["POST"] , ),
] , timeout=600 , )
def __snake_case ( self : Dict):
run(self._app , host=self.host , port=self.port , workers=self.workers)
def __snake_case ( self : int):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def __snake_case ( self : Dict , __UpperCAmelCase : str = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase)):
try:
a : Optional[Any] = self._pipeline.tokenizer.tokenize(__UpperCAmelCase)
if return_ids:
a : List[str] = self._pipeline.tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
return ServeTokenizeResult(tokens=__UpperCAmelCase , tokens_ids=__UpperCAmelCase)
else:
return ServeTokenizeResult(tokens=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(__UpperCAmelCase)})
def __snake_case ( self : int , __UpperCAmelCase : List[int] = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , __UpperCAmelCase : bool = Body(__UpperCAmelCase , embed=__UpperCAmelCase) , ):
try:
a : List[str] = self._pipeline.tokenizer.decode(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
return ServeDeTokenizeResult(model="" , text=__UpperCAmelCase)
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(__UpperCAmelCase)})
async def __snake_case ( self : List[Any] , __UpperCAmelCase : Tuple=Body(__UpperCAmelCase , embed=__UpperCAmelCase)):
# Check we don't have empty string
if len(__UpperCAmelCase) == 0:
return ServeForwardResult(output=[] , attention=[])
try:
# Forward through the model
a : List[str] = self._pipeline(__UpperCAmelCase)
return ServeForwardResult(output=__UpperCAmelCase)
except Exception as e:
raise HTTPException(500 , {"error": str(__UpperCAmelCase)})
| 226 |
"""simple docstring"""
import datasets
__lowercase = """\
@InProceedings{conneau2018xnli,
author = \"Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin\",
title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",
booktitle = \"Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing\",
year = \"2018\",
publisher = \"Association for Computational Linguistics\",
location = \"Brussels, Belgium\",
}
"""
__lowercase = """\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
"""
__lowercase = """
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
'accuracy': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric(\"xnli\")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
"""
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def __snake_case ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple):
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase)}
| 226 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a : int = ''
while len(_SCREAMING_SNAKE_CASE ) % 3 != 0:
__a : str = '0' + bin_string
__a : List[str] = [
bin_string[index : index + 3]
for index in range(len(_SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a : int = 0
for index, val in enumerate(_SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(_SCREAMING_SNAKE_CASE ) )
oct_string += str(_SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=[] ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = size[0] - overlap_pixels * 2
__lowerCAmelCase: str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase: Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__lowerCAmelCase: int = np.pad(SCREAMING_SNAKE_CASE , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase: Dict = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase: Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase: List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase: List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
"""simple docstring"""
return max(SCREAMING_SNAKE_CASE , min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( SCREAMING_SNAKE_CASE : [int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : [int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Tuple = list(SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase: int = clamp_rect(SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase: List[Any] = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase: List[Any] = tile.crop(SCREAMING_SNAKE_CASE )
return tile
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: List[str] = n % d
return n - divisor
class A_ ( snake_case__ ):
def __init__( self : Optional[Any] , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : DDPMScheduler , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : int = 3_5_0 , ) -> Optional[Any]:
super().__init__(
vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , low_res_scheduler=UpperCAmelCase , scheduler=UpperCAmelCase , max_noise_level=UpperCAmelCase , )
def UpperCAmelCase ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
__lowerCAmelCase: Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__lowerCAmelCase: Optional[Any] = add_overlap_rect(UpperCAmelCase , UpperCAmelCase , image.size )
__lowerCAmelCase: Any = image.crop(UpperCAmelCase )
__lowerCAmelCase: Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase: Tuple = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase: Union[str, Any] = max(0 , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = squeeze_tile(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = to_input.size
__lowerCAmelCase: List[Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__lowerCAmelCase: int = super(UpperCAmelCase , self ).__call__(image=UpperCAmelCase , **UpperCAmelCase ).images[0]
__lowerCAmelCase: Dict = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Union[str, Any] = unsqueeze_tile(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__lowerCAmelCase: Optional[int] = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__lowerCAmelCase: int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=UpperCAmelCase ) , mode='L' , )
final_image.paste(
UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , UpperCAmelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[str, List[str]] , UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCAmelCase : int = 7_5 , UpperCAmelCase : float = 9.0 , UpperCAmelCase : int = 5_0 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1_2_8 , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 3_2 , ) -> str:
__lowerCAmelCase: List[Any] = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase: str = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase: List[Any] = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase: Optional[Any] = tcx * tcy
__lowerCAmelCase: Tuple = 0
for y in range(UpperCAmelCase ):
for x in range(UpperCAmelCase ):
self._process_tile(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , prompt=UpperCAmelCase , num_inference_steps=UpperCAmelCase , guidance_scale=UpperCAmelCase , noise_level=UpperCAmelCase , negative_prompt=UpperCAmelCase , num_images_per_prompt=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , latents=UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _a ( ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCAmelCase: Dict = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE , revision='fp16' , torch_dtype=torch.floataa )
__lowerCAmelCase: Optional[Any] = pipe.to('cuda' )
__lowerCAmelCase: Tuple = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE : Tuple ):
print(f'''progress: {obj['progress']:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
__lowerCAmelCase: str = pipe(image=SCREAMING_SNAKE_CASE , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 322 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , **UpperCamelCase: int ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self: Tuple , UpperCamelCase: Union[np.ndarray, bytes, str] , **UpperCamelCase: Tuple ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[Any]="This is a sound of {}." ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(UpperCamelCase ).content
else:
with open(UpperCamelCase , """rb""" ) as f:
A__ = f.read()
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ffmpeg_read(UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ = candidate_labels
A__ = [hypothesis_template.format(UpperCamelCase ) for x in candidate_labels]
A__ = self.tokenizer(UpperCamelCase , return_tensors=self.framework , padding=UpperCamelCase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = model_inputs.pop("""candidate_labels""" )
A__ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**UpperCamelCase , **UpperCamelCase )
A__ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = model_outputs.pop("""candidate_labels""" )
A__ = model_outputs["""logits"""][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase , UpperCamelCase ) , key=lambda UpperCamelCase : -x[0] )
]
return result
| 69 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
UpperCamelCase = flatten_dict(UpperCamelCase_ )
return flax_params
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
UpperCamelCase = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ )
UpperCamelCase = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ )
UpperCamelCase = flax_dict[key]
UpperCamelCase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=False ) -> int:
'''simple docstring'''
UpperCamelCase = get_flax_param(UpperCamelCase_ )
if not use_large:
UpperCamelCase = PixaStructVisionConfig()
UpperCamelCase = PixaStructTextConfig()
else:
UpperCamelCase = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
UpperCamelCase = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
UpperCamelCase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCamelCase_ )
UpperCamelCase = PixaStructForConditionalGeneration(UpperCamelCase_ )
UpperCamelCase = rename_and_convert_flax_params(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
UpperCamelCase = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
UpperCamelCase = PixaStructImageProcessor()
UpperCamelCase = PixaStructProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
if use_large:
UpperCamelCase = 4096
UpperCamelCase = True
# mkdir if needed
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
print("""Model saved in {}""".format(UpperCamelCase_ ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 343 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCamelCase = result + left + right
return input_list
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
if len(UpperCamelCase_ ) <= 1:
return input_list
UpperCamelCase = list(UpperCamelCase_ )
# iteration for two-way merging
UpperCamelCase = 2
while p <= len(UpperCamelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = i + p - 1
UpperCamelCase = (low + high + 1) // 2
UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase_ ):
UpperCamelCase = i
UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
_SCREAMING_SNAKE_CASE = []
else:
_SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 343 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowercase : List[str]
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowercase : Optional[List] =None
lowercase : Optional[int] =None
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ =len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ =[]
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase, lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_, lowerCamelCase_ =zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCamelCase : Tuple = 10
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for i in range(a_ , a_ ):
if array[i] == target:
return i
return -1
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = 0
A_ : Optional[int] = len(a_ )
while left <= right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A_ : str = (left + right) // 3 + 1
A_ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A_ : Optional[int] = one_third - 1
elif array[two_third] < target:
A_ : Optional[int] = two_third + 1
else:
A_ : Union[str, Any] = one_third + 1
A_ : List[Any] = two_third - 1
else:
return -1
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(a_ , a_ , a_ , a_ )
A_ : Any = (left + right) // 3 + 1
A_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(a_ , one_third - 1 , a_ , a_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , a_ , a_ , a_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , a_ , a_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Any = input('Enter numbers separated by comma:\n').strip()
_lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowerCamelCase : Any = int(input('Enter the number to be found in the list:\n').strip())
_lowerCamelCase : Optional[int] = ite_ternary_search(collection, target)
_lowerCamelCase : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print('Not found')
| 167 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
try:
with open(a_ , """rb""" ) as flax_state_f:
A_ : Tuple = from_bytes(a_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(a_ ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(a_ , a_ )
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
A_ : List[Any] = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa , a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
A_ : str = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , a_ )
A_ : Any = """"""
A_ : Optional[int] = flatten_dict(a_ , sep=""".""" )
A_ : List[str] = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ : Union[str, Any] = []
A_ : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ : List[Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ : Optional[Any] = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[Any] = jnp.transpose(a_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ : int = flax_key_tuple_array[:-1] + ["""weight"""]
A_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ : Any = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(a_ ):
A_ : Tuple = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
A_ : Dict = """.""".join(a_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
A_ : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ , np.ndarray ) else flax_tensor
A_ : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
A_ : Dict = list(a_ )
if len(a_ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(a_ ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
return pt_model
| 344 | 0 |
from bisect import bisect
from itertools import accumulate
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
A_ , A_ : str = [i[0] for i in r], [i[1] for i in r]
A_ : Tuple = list(accumulate(SCREAMING_SNAKE_CASE ) )
A_ : Optional[int] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
A_ , A_ , A_ : Any = equationa
A_ , A_ , A_ : Union[str, Any] = equationa
# Calculate the determinants of the matrices
A_ : Optional[Any] = aa * ba - aa * ba
A_ : Optional[int] = ca * ba - ca * ba
A_ : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
A_ : Optional[int] = determinant_x / determinant
A_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 65 | 1 |
__A =frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A =frozenset(["prompt", "negative_prompt"])
__A =frozenset([])
__A =frozenset(["image"])
__A =frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A =frozenset(["image"])
__A =frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A =frozenset(["prompt", "image", "negative_prompt"])
__A =frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A =frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A =frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A =frozenset(["image", "mask_image"])
__A =frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A =frozenset(["example_image", "image", "mask_image"])
__A =frozenset(["class_labels"])
__A =frozenset(["class_labels"])
__A =frozenset(["batch_size"])
__A =frozenset([])
__A =frozenset(["batch_size"])
__A =frozenset([])
__A =frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A =frozenset(["prompt", "negative_prompt"])
__A =frozenset(["input_tokens"])
__A =frozenset(["input_tokens"])
| 226 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def a ( *_UpperCAmelCase : List[str] ):
'''simple docstring'''
with open(_UpperCAmelCase , '''r''' ) as fh:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase , fcntl.LOCK_UN )
__A =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__A =torch.device("cuda", local_rank)
__A =socket.gethostname()
__A =f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A =dist.get_rank()
__A =dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 226 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=lowerCAmelCase_ ):
_lowerCamelCase : Optional[Any] = ["""speech"""]
def __init__( self : List[str] , *snake_case_ : int , **snake_case_ : Dict ):
requires_backends(self , ["speech"] )
class A_ ( metaclass=lowerCAmelCase_ ):
_lowerCamelCase : str = ["""speech"""]
def __init__( self : Any , *snake_case_ : int , **snake_case_ : Any ):
requires_backends(self , ["speech"] )
| 371 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__SCREAMING_SNAKE_CASE :Tuple = '''\
'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__SCREAMING_SNAKE_CASE :List[Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : int = 1_6 , snake_case_ : bool = True , snake_case_ : int=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = "cuda"
else:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case_ )
_UpperCAmelCase = model.to(snake_case_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors="pt" , return_attention_mask=snake_case_ , ).to(snake_case_ )
_UpperCAmelCase = encodings["input_ids"]
_UpperCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(snake_case_ ) , snake_case_ ) ):
_UpperCAmelCase = min(start_index + batch_size , len(snake_case_ ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case_ )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case_ ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case_ )}
| 156 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''▁'''
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
__UpperCamelCase = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
__UpperCamelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = kwargs.get('additional_special_tokens', [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model)
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = src_lang if src_lang is not None else 'en_XX'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self) -> int:
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a_ ( self) -> str:
return self._src_lang
@src_lang.setter
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Dict:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> None:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self) -> Dict:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self, lowerCAmelCase__) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__)
snake_case_ = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
snake_case_ = [1] * len(self.prefix_tokens)
snake_case_ = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
snake_case_ = src_lang
snake_case_ = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.convert_tokens_to_ids(lowerCAmelCase__)
snake_case_ = tgt_lang_id
return inputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = "en_XX", lowerCAmelCase__ = None, lowerCAmelCase__ = "ro_RO", **lowerCAmelCase__, ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = self.lang_code_to_id[tgt_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
| 69 | """simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 245 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =botoa.client('iam' )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ ,AssumeRolePolicyDocument=json.dumps(a_ ,indent=2 ) )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ ,PolicyName=F'{role_name}_policy_permission' ,PolicyDocument=json.dumps(a_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : List[Any] =botoa.client('iam' )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def A ( ) -> Tuple:
__UpperCamelCase : Any =_ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,a_ ,)
__UpperCamelCase : str =None
if credentials_configuration == 0:
__UpperCamelCase : str =_ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
__UpperCamelCase : Optional[Any] =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__UpperCamelCase : int =_ask_field('AWS Access Key ID: ' )
__UpperCamelCase : Dict =aws_access_key_id
__UpperCamelCase : Any =_ask_field('AWS Secret Access Key: ' )
__UpperCamelCase : Optional[Any] =aws_secret_access_key
__UpperCamelCase : Tuple =_ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
__UpperCamelCase : List[str] =aws_region
__UpperCamelCase : Any =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,a_ ,)
if role_management == 0:
__UpperCamelCase : Optional[Any] =_ask_field('Enter your IAM role name: ' )
else:
__UpperCamelCase : Dict ='accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(a_ )
__UpperCamelCase : List[Any] =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : int =None
if is_custom_docker_image:
__UpperCamelCase : List[Any] =_ask_field('Enter your Docker image: ' ,lambda a_ : str(a_ ).lower() )
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Optional[Any] =None
if is_sagemaker_inputs_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : str =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Dict =None
if is_sagemaker_metrics_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : int =_ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
__UpperCamelCase : int ={}
__UpperCamelCase : str =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_dynamo:
__UpperCamelCase : Dict ='dynamo_'
__UpperCamelCase : Optional[int] =_ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_custom_options:
__UpperCamelCase : List[str] =_ask_options(
'Which mode do you want to use?' ,a_ ,lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] ,default='default' ,)
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__UpperCamelCase : int =_ask_options(
a_ ,a_ ,lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCamelCase : List[str] =_ask_field(a_ ,lambda a_ : str(a_ ).lower() ,default='ml.p3.2xlarge' )
__UpperCamelCase : Union[str, Any] =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCamelCase : List[str] =_ask_field(
'How many machines do you want use? [1]: ' ,a_ ,default=1 ,)
__UpperCamelCase : Optional[Any] =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=a_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=a_ ,use_cpu=a_ ,dynamo_config=a_ ,eca_instance_type=a_ ,profile=a_ ,region=a_ ,iam_role_name=a_ ,mixed_precision=a_ ,num_machines=a_ ,sagemaker_inputs_file=a_ ,sagemaker_metrics_file=a_ ,)
| 245 | 1 |
'''simple docstring'''
def a ( __a ) -> int:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
UpperCamelCase__ :Optional[int] = len(a__ )
UpperCamelCase__ :List[str] = max(a__ )
UpperCamelCase__ :List[str] = min(a__ )
# create the counting array
UpperCamelCase__ :Union[str, Any] = coll_max + 1 - coll_min
UpperCamelCase__ :Union[str, Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
UpperCamelCase__ :Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCamelCase__ :Any = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
UpperCamelCase__ :Tuple = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def a ( __a ) -> List[str]:
'''simple docstring'''
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted)) | 97 |
from math import ceil
def __lowerCAmelCase ( a__ = 1001 ) -> int:
__a = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__a = 2 * i + 1
__a = 2 * i
__a = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number') | 6 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( a__ , unittest.TestCase ):
a : int =DebertaVaTokenizer
a : List[Any] =DebertaVaTokenizerFast
a : Any =True
a : Dict =True
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ),lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ),lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"""<pad>""" )
self.assertEqual(vocab_keys[1],"""<unk>""" )
self.assertEqual(vocab_keys[-1],"""[PAD]""" )
self.assertEqual(len(lowerCAmelCase__ ),3_00_01 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,3_00_00 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,do_lower_case=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,do_lower_case=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,do_lower_case=lowerCAmelCase__,split_by_punct=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [13, 1, 43_98, 25, 21, 12_89]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__,keep_accents=lowerCAmelCase__ )
__lowerCAmelCase = DebertaVaTokenizerFast(lowerCAmelCase__,keep_accents=lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase__,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__,lowerCAmelCase__ )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = DebertaVaTokenizer(lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.encode("""sequence builders""" )
__lowerCAmelCase = tokenizer.encode("""multi-sequence build""" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__,lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id],lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id],lowerCAmelCase__,)
@slow
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__,model_name="""microsoft/deberta-v2-xlarge""",revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""",)
| 362 |
'''simple docstring'''
import random
def _lowerCAmelCase ( lowercase ) -> bool:
__lowerCAmelCase = num - 1
__lowerCAmelCase = 0
while s % 2 == 0:
__lowerCAmelCase = s // 2
t += 1
for _ in range(5 ):
__lowerCAmelCase = random.randrange(2 , num - 1 )
__lowerCAmelCase = pow(lowercase , lowercase , lowercase )
if v != 1:
__lowerCAmelCase = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCAmelCase = i + 1
__lowerCAmelCase = (v**2) % num
return True
def _lowerCAmelCase ( lowercase ) -> bool:
if num < 2:
return False
__lowerCAmelCase = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowercase )
def _lowerCAmelCase ( lowercase = 1024 ) -> int:
while True:
__lowerCAmelCase = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowercase ):
return num
if __name__ == "__main__":
_a : Optional[int] = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 46 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = filter(lambda __A : p.requires_grad, model.parameters() )
UpperCAmelCase__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
if metric == "rouge2":
UpperCAmelCase__ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase__ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase__ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase__ = ModelCheckpoint(
dirpath=__A, filename=__A, monitor=f"""val_{metric}""", mode="max", save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""", mode="min" if "loss" in metric else "max", patience=__A, verbose=__A, )
class A ( pl.Callback ):
def lowercase_ (self : int , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCAmelCase )
@rank_zero_only
def lowercase_ (self : str , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=True ) -> None:
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase__ = od / "test_results.txt"
UpperCAmelCase__ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase__ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase__ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , "a+" ) as writer:
for key in sorted(__UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase__ = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor ):
UpperCAmelCase__ = val.item()
UpperCAmelCase__ = f"""{key}: {val:.6f}\n"""
writer.write(__UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase__ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCAmelCase )
@rank_zero_only
def lowercase_ (self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
try:
UpperCAmelCase__ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase__ = pl_module.model.num_parameters()
UpperCAmelCase__ = count_trainable_parameters(__UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowercase_ (self : List[Any] , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test" )
@rank_zero_only
def lowercase_ (self : Tuple , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : Union[str, Any] ) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 65 | import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , 'Tatoeba directory does not exist.' )
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__UpperCAmelCase )
@slow
def lowercase_ (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=__UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 65 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : List[str] = """ibert"""
def __init__( self , __lowercase=30_522 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=False , __lowercase="none" , **__lowercase , ) -> int:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase)
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :str = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Optional[int] = hidden_act
__UpperCamelCase :Optional[Any] = intermediate_size
__UpperCamelCase :List[str] = hidden_dropout_prob
__UpperCamelCase :List[Any] = attention_probs_dropout_prob
__UpperCamelCase :Any = max_position_embeddings
__UpperCamelCase :Dict = type_vocab_size
__UpperCamelCase :List[Any] = initializer_range
__UpperCamelCase :Union[str, Any] = layer_norm_eps
__UpperCamelCase :Optional[Any] = position_embedding_type
__UpperCamelCase :Optional[int] = quant_mode
__UpperCamelCase :Tuple = force_dequant
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase :Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 355 | import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[str] = emb.weight.shape
__UpperCamelCase :str = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = emb.weight.data
return lin_layer
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCamelCase :Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__UpperCamelCase :Dict = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCamelCase :Dict = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__UpperCamelCase :Tuple = state_dict['''decoder.embed_tokens.weight''']
__UpperCamelCase :int = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase = parser.parse_args()
__lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 105 | 0 |
from typing import List
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {key: len(__lowerCAmelCase) for key, value in gen_kwargs.items() if isinstance(__lowerCAmelCase , __lowerCAmelCase)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items())
+ "\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
))
__UpperCamelCase : Union[str, Any] = max(lists_lengths.values() , default=0)
return max(1 , __lowerCAmelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any) -> List[range]:
'''simple docstring'''
__UpperCamelCase : Dict = []
for group_idx in range(__lowerCAmelCase):
__UpperCamelCase : Optional[Any] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__UpperCamelCase : Any = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__UpperCamelCase : Dict = range(__lowerCAmelCase , start + num_shards_to_add)
shards_indices_per_group.append(__lowerCAmelCase)
return shards_indices_per_group
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any]) -> List[dict]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = _number_of_shards_in_gen_kwargs(__lowerCAmelCase)
if num_shards == 1:
return [dict(__lowerCAmelCase)]
else:
__UpperCamelCase : List[Any] = _distribute_shards(num_shards=__lowerCAmelCase , max_num_jobs=__lowerCAmelCase)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCAmelCase , __lowerCAmelCase)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCAmelCase))
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowerCAmelCase)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[Any]) -> dict:
'''simple docstring'''
__UpperCamelCase : Dict = {len(__lowerCAmelCase) for value in gen_kwargs.values() if isinstance(__lowerCAmelCase , __lowerCAmelCase)}
__UpperCamelCase : str = {}
for size in list_sizes:
__UpperCamelCase : Optional[Any] = list(range(__lowerCAmelCase))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__UpperCamelCase : Union[str, Any] = dict(__lowerCAmelCase)
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
__UpperCamelCase : List[str] = [value[i] for i in indices_per_size[len(__lowerCAmelCase)]]
return shuffled_kwargs | 232 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__lowerCAmelCase : Any = (3, 9, -11, 0, 7, 5, 1, -1)
__lowerCAmelCase : Tuple = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : int
A__ : Node | None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Iterable[int] ):
__lowercase : Node | None = None
for i in sorted(_snake_case , reverse=_snake_case ):
__lowercase : List[Any] = Node(_snake_case , self.head )
def __iter__( self : str ):
__lowercase : Union[str, Any] = self.head
while node:
yield node.data
__lowercase : List[Any] = node.next_node
def __len__( self : str ):
return sum(1 for _ in self )
def __str__( self : List[str] ):
return " -> ".join([str(_snake_case ) for node in self] )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(__lowerCAmelCase ) + list(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 156 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowercase ( lowerCamelCase : Union[str, Any] ):
return EnvironmentCommand()
class _lowercase ( _UpperCamelCase ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = parser.add_parser('env' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = huggingface_hub.__version__
UpperCamelCase_ : Tuple = 'not installed'
UpperCamelCase_ : Dict = 'NA'
if is_torch_available():
import torch
UpperCamelCase_ : Any = torch.__version__
UpperCamelCase_ : Union[str, Any] = torch.cuda.is_available()
UpperCamelCase_ : List[Any] = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase_ : Union[str, Any] = transformers.__version__
UpperCamelCase_ : Union[str, Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase_ : Tuple = accelerate.__version__
UpperCamelCase_ : Any = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase_ : Any = xformers.__version__
UpperCamelCase_ : Dict = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f"{pt_version} ({pt_cuda_available})",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_SCREAMING_SNAKE_CASE ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 368 | def __lowercase ( lowerCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
UpperCamelCase_ : Optional[int] = sorted(string.lower() )
return len(lowerCamelCase ) == len(set(lowerCamelCase ) )
if __name__ == "__main__":
a_ = input('Enter a string ').strip()
a_ = is_isogram(input_str)
print(F"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 50 | 0 |
def __lowercase ( _A ) -> bool:
if not isinstance(_A , _A ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_A ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(_A ) == 1:
return True
SCREAMING_SNAKE_CASE : List[Any] = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowercase ( _A ) -> float:
if not isinstance(_A , _A ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(_A ) == 0:
raise ValueError("""Input list must be a non empty list""" )
SCREAMING_SNAKE_CASE : Any = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Tuple = logging.getLogger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""token-classification"""
def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Optional[Any]:
"""simple docstring"""
if type(UpperCAmelCase__ ) == dict:
SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , hparams.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Any:
"""simple docstring"""
return self.model(**UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) ->DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : str = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Dict = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = outputs[:2]
SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self : int , UpperCAmelCase__ : List[str] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""precision""": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""recall""": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""f1""": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() )
SCREAMING_SNAKE_CASE : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._eval_end(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._eval_end(UpperCAmelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = NERTransformer(args)
UpperCAmelCase__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 245 | 1 |
import argparse
import struct
import unittest
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : List[str] = data
# Initialize hash values
UpperCAmelCase_ : Optional[int] = [
0X6a_09_e6_67,
0Xbb_67_ae_85,
0X3c_6e_f3_72,
0Xa5_4f_f5_3a,
0X51_0e_52_7f,
0X9b_05_68_8c,
0X1f_83_d9_ab,
0X5b_e0_cd_19,
]
# Initialize round constants
UpperCAmelCase_ : Tuple = [
0X42_8a_2f_98,
0X71_37_44_91,
0Xb5_c0_fb_cf,
0Xe9_b5_db_a5,
0X39_56_c2_5b,
0X59_f1_11_f1,
0X92_3f_82_a4,
0Xab_1c_5e_d5,
0Xd8_07_aa_98,
0X12_83_5b_01,
0X24_31_85_be,
0X55_0c_7d_c3,
0X72_be_5d_74,
0X80_de_b1_fe,
0X9b_dc_06_a7,
0Xc1_9b_f1_74,
0Xe4_9b_69_c1,
0Xef_be_47_86,
0X0f_c1_9d_c6,
0X24_0c_a1_cc,
0X2d_e9_2c_6f,
0X4a_74_84_aa,
0X5c_b0_a9_dc,
0X76_f9_88_da,
0X98_3e_51_52,
0Xa8_31_c6_6d,
0Xb0_03_27_c8,
0Xbf_59_7f_c7,
0Xc6_e0_0b_f3,
0Xd5_a7_91_47,
0X06_ca_63_51,
0X14_29_29_67,
0X27_b7_0a_85,
0X2e_1b_21_38,
0X4d_2c_6d_fc,
0X53_38_0d_13,
0X65_0a_73_54,
0X76_6a_0a_bb,
0X81_c2_c9_2e,
0X92_72_2c_85,
0Xa2_bf_e8_a1,
0Xa8_1a_66_4b,
0Xc2_4b_8b_70,
0Xc7_6c_51_a3,
0Xd1_92_e8_19,
0Xd6_99_06_24,
0Xf4_0e_35_85,
0X10_6a_a0_70,
0X19_a4_c1_16,
0X1e_37_6c_08,
0X27_48_77_4c,
0X34_b0_bc_b5,
0X39_1c_0c_b3,
0X4e_d8_aa_4a,
0X5b_9c_ca_4f,
0X68_2e_6f_f3,
0X74_8f_82_ee,
0X78_a5_63_6f,
0X84_c8_78_14,
0X8c_c7_02_08,
0X90_be_ff_fa,
0Xa4_50_6c_eb,
0Xbe_f9_a3_f7,
0Xc6_71_78_f2,
]
UpperCAmelCase_ : Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase ) -> bytes:
UpperCAmelCase_ : int = b'\x80' + (b'\x00' * (6_3 - (len(_UpperCamelCase ) + 8) % 6_4))
UpperCAmelCase_ : Optional[Any] = struct.pack('>Q' , (len(_UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def __UpperCAmelCase ( self ) -> None:
# Convert into blocks of 64 bytes
UpperCAmelCase_ : Dict = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase_ : List[Any] = list(struct.unpack('>16L' , _UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
UpperCAmelCase_ : List[Any] = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase_ : Tuple = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
UpperCAmelCase_ : int = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
UpperCAmelCase_ : List[Any] = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
UpperCAmelCase_ : int = self.ror(_UpperCamelCase , 6 ) ^ self.ror(_UpperCamelCase , 1_1 ) ^ self.ror(_UpperCamelCase , 2_5 )
UpperCAmelCase_ : List[Any] = (e & f) ^ ((~e & 0Xff_ff_ff_ff) & g)
UpperCAmelCase_ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
UpperCAmelCase_ : List[Any] = self.ror(_UpperCamelCase , 2 ) ^ self.ror(_UpperCamelCase , 1_3 ) ^ self.ror(_UpperCamelCase , 2_2 )
UpperCAmelCase_ : List[str] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase_ : Optional[Any] = (sa + maj) % 0X1_00_00_00_00
UpperCAmelCase_ : str = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
UpperCAmelCase_ : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase_ : Optional[Any] = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase_ : Optional[Any] = ''.join([hex(_UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return 0Xff_ff_ff_ff & (value << (3_2 - rotations)) | (value >> rotations)
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> None:
import hashlib
UpperCAmelCase_ : Dict = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_UpperCamelCase ).hash , hashlib.shaaaa(_UpperCamelCase ).hexdigest() )
def lowercase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
else:
UpperCAmelCase_ : Tuple = bytes(__snake_case , 'utf-8' )
print(SHAaaa(__snake_case ).hash )
if __name__ == "__main__":
main()
| 369 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__UpperCAmelCase = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SavedModel()
UpperCAmelCase_ : Optional[Any] = []
with open(os.path.join(__snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(__snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__snake_case )] )
with open(__snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : Optional[int] = sorted(__snake_case )
UpperCAmelCase_ : int = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__snake_case )
if strict and len(__snake_case ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__snake_case ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*__snake_case , sep='\n' )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
__UpperCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 145 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = [0] * len(lowercase_ )
for i in range(1 , len(lowercase_ ) ):
# use last results for better performance - dynamic programming
UpperCAmelCase = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
UpperCAmelCase = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
UpperCAmelCase = j
return prefix_result
def _lowerCAmelCase ( lowercase_ ):
return max(prefix_function(lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'summarization'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = 'rouge2'
def __init__( self , lowercase , **lowercase ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(lowercase )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()["""repo_sha"""]
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _snake_case ( self , lowercase ) -> Dict[str, List[str]]:
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase = True
return readable_batch
def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]:
return self.model(lowercase , **lowercase )
def _snake_case ( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = self.tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
return lmap(str.strip , lowercase )
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , lowercase ):
lowerCAmelCase = self.model._shift_right(lowercase )
else:
lowerCAmelCase = shift_tokens_right(lowercase , lowercase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(lowercase )
lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase )
lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase )
return (loss,)
@property
def _snake_case ( self ) -> int:
return self.tokenizer.pad_token_id
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
# tokens per batch
lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].shape[0]
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase , lowercase="val" ) -> Dict:
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses["""loss"""]
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase )
lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return calculate_rouge(lowercase , lowercase )
def _snake_case ( self , lowercase ) -> dict:
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCAmelCase = self.ids_to_clean_text(lowercase )
lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase = self._step(lowercase )
lowerCAmelCase = dict(zip(self.loss_names , lowercase ) )
lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase )
lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) )
base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase )
return base_metrics
def _snake_case ( self , lowercase , lowercase ) -> Dict:
return self._generative_step(lowercase )
def _snake_case ( self , lowercase ) -> int:
return self.validation_epoch_end(lowercase , prefix="""test""" )
def _snake_case ( self , lowercase ) -> SeqaSeqDataset:
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , )
return dataset
def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader:
lowerCAmelCase = self.get_dataset(lowercase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , )
def _snake_case ( self ) -> DataLoader:
lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase )
return dataloader
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def _snake_case ( self ) -> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowercase , lowercase )
add_generic_args(lowercase , lowercase )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase )
parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase )
parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase )
parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase )
parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase )
parser.add_argument(
"""--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'translation'
_SCREAMING_SNAKE_CASE = ['loss']
_SCREAMING_SNAKE_CASE = ['bleu']
_SCREAMING_SNAKE_CASE = 'bleu'
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
super().__init__(lowercase , **lowercase )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def _snake_case ( self , lowercase , lowercase ) -> dict:
return calculate_bleu(lowercase , lowercase )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == """loss"""
lowerCAmelCase = generic_train(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase = """"""
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 46 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =VQModel
UpperCAmelCase ="sample"
@property
def lowerCAmelCase ( self , snake_case=(3_2, 3_2)) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =4
_UpperCAmelCase : Optional[int] =3
_UpperCAmelCase : Dict =floats_tensor((batch_size, num_channels) + sizes).to(snake_case)
return {"sample": image}
@property
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
return (3, 3_2, 3_2)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] ={
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_UpperCAmelCase : Optional[Any] =self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int =VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=snake_case)
self.assertIsNotNone(snake_case)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(snake_case)
_UpperCAmelCase : Union[str, Any] =model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(snake_case).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_UpperCAmelCase : Optional[int] =torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_UpperCAmelCase : str =image.to(snake_case)
with torch.no_grad():
_UpperCAmelCase : Optional[int] =model(snake_case).sample
_UpperCAmelCase : int =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase : Any =torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43])
# fmt: on
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3))
| 242 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase =logging.getLogger(__name__)
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case=None) -> List[Any]:
'''simple docstring'''
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
_UpperCAmelCase : Union[str, Any] =None
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
logger.info('initializing retrieval')
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized')
# needs to be set manually
_UpperCAmelCase : Optional[Any] =self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : Optional[int] =str(distributed_port + 1)
_UpperCAmelCase : Any =dist.new_group(ranks=snake_case , backend='gloo')
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main')
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return dist.get_rank(group=self.process_group) == 0
def lowerCAmelCase ( self , snake_case , snake_case , snake_case=torch.floataa) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =torch.empty(snake_case , dtype=snake_case)
dist.scatter(snake_case , src=0 , scatter_list=snake_case , group=self.process_group)
return target_tensor
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : str =next((addr for addr in addrs if addr.startswith('e')) , snake_case)
return ifname
def lowerCAmelCase ( self , snake_case , snake_case) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =self._main_retrieve(snake_case , snake_case)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case)
# distributed training
_UpperCAmelCase : Optional[int] =dist.get_world_size(group=self.process_group)
# gather logic
_UpperCAmelCase : str =None
if self._is_main():
_UpperCAmelCase : Union[str, Any] =[torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(snake_case)]
dist.gather(torch.tensor(snake_case) , dst=0 , gather_list=snake_case , group=self.process_group)
# scatter logic
_UpperCAmelCase : Optional[Any] =question_hidden_states.shape[0]
_UpperCAmelCase : List[Any] =[]
_UpperCAmelCase : Any =[]
if self._is_main():
assert len(snake_case) == world_size
_UpperCAmelCase , _UpperCAmelCase : Tuple =self._main_retrieve(torch.cat(snake_case).numpy() , snake_case)
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =torch.tensor(snake_case), torch.tensor(snake_case)
_UpperCAmelCase : List[str] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : Union[str, Any] =self._chunk_tensor(snake_case , snake_case)
_UpperCAmelCase : int =self._scattered(snake_case , [n_queries, n_docs] , target_type=torch.intaa)
_UpperCAmelCase : Dict =self._scattered(snake_case , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case)
| 242 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''',safety_checker=lowerCAmelCase__,cache_dir=lowerCAmelCase__ )
A__ = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__,os.listdir(lowerCAmelCase__ )[0],'''snapshots''' ) )]
A__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''',safety_checker=lowerCAmelCase__ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 4
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
A__ = replicate(lowerCAmelCase__ )
A__ = jax.random.split(lowerCAmelCase__,lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase__,dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
A__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase__ ) == num_samples
def UpperCamelCase ( self ):
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''flax''',safety_checker=lowerCAmelCase__ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
A__ = replicate(lowerCAmelCase__ )
A__ = jax.random.split(lowerCAmelCase__,lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__,dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def UpperCamelCase ( self ):
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=lowerCAmelCase__ )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
A__ = replicate(lowerCAmelCase__ )
A__ = jax.random.split(lowerCAmelCase__,lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__,dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase ( self ):
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa )
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
A__ = replicate(lowerCAmelCase__ )
A__ = jax.random.split(lowerCAmelCase__,lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__,dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def UpperCamelCase ( self ):
A__ = FlaxDDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',set_alpha_to_one=lowerCAmelCase__,steps_offset=1,)
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,scheduler=lowerCAmelCase__,safety_checker=lowerCAmelCase__,)
A__ = scheduler.create_state()
A__ = scheduler_state
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.random.PRNGKey(0 )
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
# shard inputs and rng
A__ = replicate(lowerCAmelCase__ )
A__ = jax.random.split(lowerCAmelCase__,lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:],dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase__,dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def UpperCamelCase ( self ):
A__ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = jax.random.split(jax.random.PRNGKey(0 ),lowerCAmelCase__ )
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=lowerCAmelCase__,)
A__ = replicate(lowerCAmelCase__ )
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''',revision='''bf16''',dtype=jnp.bfloataa,safety_checker=lowerCAmelCase__,use_memory_efficient_attention=lowerCAmelCase__,)
A__ = replicate(lowerCAmelCase__ )
A__ = pipeline.prepare_inputs(lowerCAmelCase__ )
A__ = shard(lowerCAmelCase__ )
A__ = pipeline(lowerCAmelCase__,lowerCAmelCase__,lowerCAmelCase__,jit=lowerCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 193 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ : Optional[Any] = TypeVar('T')
class a ( Generic[T] ):
"""simple docstring"""
UpperCAmelCase = 42 # Cache store of keys
UpperCAmelCase = 42 # References of the keys in cache
UpperCAmelCase = 1_0 # Maximum capacity of cache
def __init__( self: Tuple , UpperCamelCase: int ):
"""simple docstring"""
A__ = deque()
A__ = set()
if not n:
A__ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
A__ = n
def UpperCamelCase ( self: int , UpperCamelCase: T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A__ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase )
else:
self.dq_store.remove(UpperCamelCase )
self.dq_store.appendleft(UpperCamelCase )
self.key_reference.add(UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
for k in self.dq_store:
print(UpperCamelCase )
def __repr__( self: Optional[Any] ):
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 69 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = tubelet_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A__ = (image_size // patch_size) ** 2
A__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A__ = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.batch_size , -1 ).bool()
A__ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
A__ = mask.sum().item()
A__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = VideoMAEModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
A__ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.model_tester.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
A__ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
def _snake_case ( ):
A__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A__ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A__ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size([1, 14_08, 15_36] )
A__ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A__ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 69 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : str ,_snake_case : List[Any] ,_snake_case : Optional[int]=3 ,_snake_case : Optional[int]=32 ,_snake_case : Union[str, Any]=3 ,_snake_case : int=10 ,_snake_case : List[str]=[10, 20, 30, 40] ,_snake_case : Any=[1, 1, 2, 1] ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Union[str, Any]="relu" ,_snake_case : Dict=3 ,_snake_case : Any=None ,) -> str:
"""simple docstring"""
lowercase__ : int = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[Any] = embeddings_size
lowercase__ : Optional[Any] = hidden_sizes
lowercase__ : str = depths
lowercase__ : Tuple = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : Union[str, Any] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Optional[Any] = len(_snake_case )
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = TFResNetModel(config=_snake_case )
lowercase__ : List[str] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase ( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : int ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Union[str, Any] = TFResNetForImageClassification(_snake_case )
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : Dict = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[int] = [*signature.parameters.keys()]
lowercase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : List[str] ,_snake_case : Optional[Any] ):
lowercase__ : str = model_class(_snake_case )
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Any:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''tf''' )
# forward pass
lowercase__ : Dict = model(**_snake_case )
# verify the logits
lowercase__ : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,_snake_case ,atol=1e-4 ) )
| 16 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[str] = len(_UpperCAmelCase )
lowerCamelCase__ : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase__ : Tuple = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase__ : Dict = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase__ : str = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase__ : Dict = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
import math
import tensorflow as tf
from packaging import version
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
_snake_case : Tuple = tf.convert_to_tensor(lowercase__ )
_snake_case : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def snake_case (__lowercase ) -> int:
'''simple docstring'''
_snake_case : Dict = tf.convert_to_tensor(lowercase__ )
_snake_case : str = tf.cast(math.pi , x.dtype )
_snake_case : List[Any] = tf.cast(0.044715 , x.dtype )
_snake_case : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase__ , 3 )) ))
return x * cdf
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : List[Any] = tf.convert_to_tensor(lowercase__ )
return x * tf.tanh(tf.math.softplus(lowercase__ ) )
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
_snake_case : List[Any] = tf.convert_to_tensor(lowercase__ )
_snake_case : Union[str, Any] = tf.cast(0.044715 , x.dtype )
_snake_case : List[Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
_snake_case : Tuple = tf.convert_to_tensor(lowercase__ )
_snake_case : List[str] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowercase__ ) , -10 , 10 )
def snake_case (__lowercase , __lowercase=-1 ) -> str:
'''simple docstring'''
_snake_case : Any = tf.split(lowercase__ , 2 , axis=lowercase__ )
return a * tf.math.sigmoid(lowercase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return tf.keras.activations.gelu(lowercase__ , approximate=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.keras.activations.gelu
__SCREAMING_SNAKE_CASE : str = approximate_gelu_wrap
else:
__SCREAMING_SNAKE_CASE : int = _gelu
__SCREAMING_SNAKE_CASE : Union[str, Any] = _gelu_new
__SCREAMING_SNAKE_CASE : List[Any] = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" ) | 356 | import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE : Any = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
_snake_case : Tuple = dataset_path.split("://" )[1]
return dataset_path
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def snake_case (__lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[int] = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def snake_case () -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_snake_case : List[str] = None
_snake_case : str = None
_snake_case : List[Any] = threading.Lock() | 284 | 0 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _lowerCamelCase :
def __init__( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=0.2 , UpperCamelCase : Union[str, Any]=0.2 ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = bp_numa
lowerCAmelCase__ : Tuple = bp_numa
lowerCAmelCase__ : Union[str, Any] = bp_numa
lowerCAmelCase__ : str = conva_get[:2]
lowerCAmelCase__ : Dict = conva_get[2]
lowerCAmelCase__ : Optional[Any] = size_pa
lowerCAmelCase__ : Dict = rate_w
lowerCAmelCase__ : Any = rate_t
lowerCAmelCase__ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase__ : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase__ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase__ : Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase__ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase__ : Any = -2 * np.random.rand(self.num_bpa ) + 1
def _lowerCAmelCase ( self : str , UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowerCAmelCase__ , """wb""" ) as f:
pickle.dump(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"""Model saved: {save_path}""" )
@classmethod
def _lowerCAmelCase ( cls : int , UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowerCAmelCase__ , """rb""" ) as f:
lowerCAmelCase__ : List[str] = pickle.load(lowerCAmelCase__ ) # noqa: S301
lowerCAmelCase__ : Dict = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
lowerCAmelCase__ : Optional[int] = model_dic.get("""size_pooling1""" )
lowerCAmelCase__ : Union[str, Any] = model_dic.get("""num_bp1""" )
lowerCAmelCase__ : Optional[int] = model_dic.get("""num_bp2""" )
lowerCAmelCase__ : int = model_dic.get("""num_bp3""" )
lowerCAmelCase__ : Dict = model_dic.get("""rate_weight""" )
lowerCAmelCase__ : Any = model_dic.get("""rate_thre""" )
# create model instance
lowerCAmelCase__ : List[Any] = CNN(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# modify model parameter
lowerCAmelCase__ : Dict = model_dic.get("""w_conv1""" )
lowerCAmelCase__ : str = model_dic.get("""wkj""" )
lowerCAmelCase__ : Optional[Any] = model_dic.get("""vji""" )
lowerCAmelCase__ : Optional[Any] = model_dic.get("""thre_conv1""" )
lowerCAmelCase__ : Union[str, Any] = model_dic.get("""thre_bp2""" )
lowerCAmelCase__ : List[str] = model_dic.get("""thre_bp3""" )
return conv_ins
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return round(lowerCAmelCase__ , 3 )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = convs[0]
lowerCAmelCase__ : List[Any] = convs[1]
lowerCAmelCase__ : List[Any] = np.shape(lowerCAmelCase__ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase__ : Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCAmelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : str = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCAmelCase__ ):
lowerCAmelCase__ : str = []
for i_focus in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ : Dict = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCAmelCase__ ) )
lowerCAmelCase__ : int = np.asmatrix(lowerCAmelCase__ ).reshape(
lowerCAmelCase__ , lowerCAmelCase__ )
data_featuremap.append(lowerCAmelCase__ )
# expanding the data slice to One dimenssion
lowerCAmelCase__ : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCAmelCase__ ) )
lowerCAmelCase__ : List[Any] = np.asarray(lowerCAmelCase__ )
return focus_list, data_featuremap
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : int="average_pool" ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(featuremaps[0] )
lowerCAmelCase__ : str = int(size_map / size_pooling )
lowerCAmelCase__ : int = []
for i_map in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ : str = featuremaps[i_map]
lowerCAmelCase__ : str = []
for i_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j_focus in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCAmelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCAmelCase__ ) )
lowerCAmelCase__ : Tuple = np.asmatrix(lowerCAmelCase__ ).reshape(lowerCAmelCase__ , lowerCAmelCase__ )
featuremap_pooled.append(lowerCAmelCase__ )
return featuremap_pooled
def _lowerCAmelCase ( self : int , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for i in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ : Optional[Any] = np.shape(data[i] )
lowerCAmelCase__ : Any = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCAmelCase__ )
lowerCAmelCase__ : Optional[int] = np.asarray(lowerCAmelCase__ )
return data_expanded
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = np.asarray(lowerCAmelCase__ )
lowerCAmelCase__ : str = np.shape(lowerCAmelCase__ )
lowerCAmelCase__ : int = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : int = 0
for i_map in range(lowerCAmelCase__ ):
lowerCAmelCase__ : Any = np.ones((size_map, size_map) )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
for j in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ : Dict = pd_pool[
i_pool
]
lowerCAmelCase__ : Tuple = i_pool + 1
lowerCAmelCase__ : List[Any] = np.multiply(
lowerCAmelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCAmelCase__ )
return pd_all
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any=bool ) -> Optional[Any]:
"""simple docstring"""
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCAmelCase__ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCAmelCase__ )) )
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Dict = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(lowerCAmelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase__ : str = np.asmatrix(datas_train[p] )
lowerCAmelCase__ : int = np.asarray(datas_teach[p] )
lowerCAmelCase__ : Optional[int] = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase__ : List[Any] = self.pooling(lowerCAmelCase__ , self.size_poolinga )
lowerCAmelCase__ : Dict = np.shape(lowerCAmelCase__ )
lowerCAmelCase__ : int = self._expand(lowerCAmelCase__ )
lowerCAmelCase__ : List[Any] = data_bp_input
lowerCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , self.vji.T ) - self.thre_bpa
lowerCAmelCase__ : Optional[Any] = self.sig(lowerCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = np.dot(lowerCAmelCase__ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase__ : Optional[Any] = self.sig(lowerCAmelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase__ : Any = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
lowerCAmelCase__ : List[str] = np.multiply(
np.dot(lowerCAmelCase__ , self.wkj ) , np.multiply(lowerCAmelCase__ , (1 - bp_outa) ) )
lowerCAmelCase__ : List[Any] = np.dot(lowerCAmelCase__ , self.vji )
lowerCAmelCase__ : Optional[Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase__ : int = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase__ : Optional[int] = self._calculate_gradient_from_pool(
lowerCAmelCase__ , lowerCAmelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase__ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase__ : Tuple = self.rate_weight * np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ : Optional[int] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase__ : List[str] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase__ : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase__ : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase__ : int = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase__ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase__ : str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase__ : int = rp + 1
lowerCAmelCase__ : List[str] = error_count / patterns
all_mse.append(lowerCAmelCase__ )
def draw_error():
lowerCAmelCase__ : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCAmelCase__ , """+-""" )
plt.plot(lowerCAmelCase__ , """r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCAmelCase__ , alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _lowerCAmelCase ( self : Any , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCAmelCase__ )) )
for p in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase__ : List[str] = np.asmatrix(datas_test[p] )
lowerCAmelCase__ : Any = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase__ : Optional[int] = self.pooling(lowerCAmelCase__ , self.size_poolinga )
lowerCAmelCase__ : List[Any] = self._expand(lowerCAmelCase__ )
lowerCAmelCase__ : int = data_bp_input
lowerCAmelCase__ : str = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase__ : Optional[Any] = self.sig(lowerCAmelCase__ )
lowerCAmelCase__ : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase__ : str = self.sig(lowerCAmelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase__ : str = [list(map(self.do_round , lowerCAmelCase__ ) ) for each in produce_out]
return np.asarray(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = np.asmatrix(lowerCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = self.convolute(
lowerCAmelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase__ : List[Any] = self.pooling(lowerCAmelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 242 | '''simple docstring'''
def __UpperCAmelCase ( a_: int = 50 ):
_UpperCAmelCase : str = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }') | 145 | 0 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case__ : Union[str, Any] = '''src/transformers'''
# Matches is_xxx_available()
snake_case__ : int = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
snake_case__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case__ : List[str] = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
snake_case__ : Optional[Any] = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
snake_case__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case__ : Any = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case__ : Union[str, Any] = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case__ : Optional[Any] = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
snake_case__ : Optional[Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
snake_case__ : Dict = re.compile(R'''^\s*try:''')
# Catches a line with else:
snake_case__ : int = re.compile(R'''^\s*else:''')
def _snake_case ( _snake_case : Optional[Any] ):
if _re_test_backend.search(_snake_case ) is None:
return None
lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def _snake_case ( _snake_case : Optional[Any] ):
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : Tuple = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[str] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
lowerCAmelCase : str = _re_one_line_import_struct.search(_snake_case ).groups()[0]
lowerCAmelCase : Dict = re.findall('''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase : Tuple = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase : int = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
lowerCAmelCase : str = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
lowerCAmelCase : Any = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
lowerCAmelCase : List[str] = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase : Optional[Any] = lines[line_index]
lowerCAmelCase : List[Any] = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase : Any = lines[line_index]
lowerCAmelCase : Tuple = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
def find_duplicates(_snake_case : Tuple ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : Tuple = '''base imports''' if key == '''none''' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _snake_case ( ):
lowerCAmelCase : int = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''__init__.py''' )
lowerCAmelCase : List[Any] = parse_init(_snake_case )
if objects is not None:
lowerCAmelCase : Tuple = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
lowerCAmelCase : int = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def _snake_case ( ):
lowerCAmelCase : Optional[Any] = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase : Dict = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
lowerCAmelCase : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Optional[Any] = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
lowerCAmelCase : Any = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
snake_case__ : str = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ):
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Any = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(_snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Any = spec.loader.load_module()
lowerCAmelCase : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_snake_case ) > 0:
lowerCAmelCase : Dict = '''\n'''.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
f'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 314 |
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314 | 1 |
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_A = None
_A = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_A = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _lowerCamelCase :
_lowerCamelCase :bool = True
_lowerCamelCase :Optional[str] = None
# Automatically constructed
_lowerCamelCase :ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase :ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_lowerCamelCase :str = field(default="Image" , init=a_ , repr=a_ )
def __call__( self : Tuple ) -> int:
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self : str , UpperCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = np.array(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(UpperCamelCase , UpperCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(UpperCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCamelCase )
elif isinstance(UpperCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCamelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : dict , UpperCamelCase : List[str]=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowerCAmelCase__ : str = {}
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(UpperCamelCase ):
lowerCAmelCase__ : Tuple = PIL.Image.open(UpperCamelCase )
else:
lowerCAmelCase__ : Union[str, Any] = path.split("""::""" )[-1]
try:
lowerCAmelCase__ : Optional[int] = string_to_dict(UpperCamelCase , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCAmelCase__ : List[str] = token_per_repo_id.get(UpperCamelCase )
except ValueError:
lowerCAmelCase__ : int = None
with xopen(UpperCamelCase , """rb""" , use_auth_token=UpperCamelCase ) as f:
lowerCAmelCase__ : Tuple = BytesIO(f.read() )
lowerCAmelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCAmelCase__ : str = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCAmelCase__ : str = pa.array([None] * len(UpperCamelCase ) , type=pa.binary() )
lowerCAmelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase__ : Tuple = pa.array([None] * len(UpperCamelCase ) , type=pa.string() )
lowerCAmelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCAmelCase__ : Dict = storage.field("""bytes""" )
else:
lowerCAmelCase__ : int = pa.array([None] * len(UpperCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCAmelCase__ : Any = storage.field("""path""" )
else:
lowerCAmelCase__ : Any = pa.array([None] * len(UpperCamelCase ) , type=pa.string() )
lowerCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCAmelCase__ : Tuple = pa.array(
[encode_np_array(np.array(UpperCamelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCAmelCase__ : Any = pa.array([None] * len(UpperCamelCase ) , type=pa.string() )
lowerCAmelCase__ : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase , self.pa_type )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : pa.StructArray ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(UpperCamelCase : Dict ):
with xopen(UpperCamelCase , """rb""" ) as f:
lowerCAmelCase__ : Union[str, Any] = f.read()
return bytes_
lowerCAmelCase__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase__ : List[Any] = pa.array(
[os.path.basename(UpperCamelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase__ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCamelCase , self.pa_type )
def lowercase_ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCAmelCase__ : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( __UpperCAmelCase ) -> bytes:
lowerCAmelCase__ : Any = BytesIO()
if image.format in list_image_compression_formats():
lowerCAmelCase__ : int = image.format
else:
lowerCAmelCase__ : Optional[int] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__UpperCAmelCase , format=__UpperCAmelCase )
return buffer.getvalue()
def lowercase_ ( __UpperCAmelCase ) -> dict:
if hasattr(__UpperCAmelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def lowercase_ ( __UpperCAmelCase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowerCAmelCase__ : Union[str, Any] = array.dtype
lowerCAmelCase__ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowerCAmelCase__ : int = dtype.kind
lowerCAmelCase__ : Any = dtype.itemsize
lowerCAmelCase__ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCAmelCase__ : Dict = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCAmelCase__ : Dict = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCAmelCase__ : Optional[int] = dtype_byteorder + dtype_kind + str(__UpperCAmelCase )
lowerCAmelCase__ : Dict = np.dtype(__UpperCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
lowerCAmelCase__ : Optional[Any] = PIL.Image.fromarray(array.astype(__UpperCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(__UpperCAmelCase )}
def lowercase_ ( __UpperCAmelCase ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = first_non_null_value(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__UpperCAmelCase , np.ndarray ):
lowerCAmelCase__ : str = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
lowerCAmelCase__ : Any = no_op_if_value_is_null(__UpperCAmelCase )
return [obj_to_image_dict_func(__UpperCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 242 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Any = "Salesforce/blip-image-captioning-base"
_lowerCamelCase :int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCamelCase :List[Any] = "image_captioner"
_lowerCamelCase :Tuple = AutoModelForVisionaSeq
_lowerCamelCase :Dict = ["image"]
_lowerCamelCase :str = ["text"]
def __init__( self : Dict , *UpperCamelCase : Any , **UpperCamelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Any , UpperCamelCase : "Image" ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=UpperCamelCase , return_tensors="""pt""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
return self.model.generate(**UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0].strip()
| 242 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[Any] = DDIMPipeline
a__ : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
a__ : Any = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
a__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
a__ : Optional[Any] = False
def UpperCamelCase__ ( self) -> Tuple:
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
__UpperCamelCase :int = DDIMScheduler()
__UpperCamelCase :Any = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[Any]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :List[str] = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :List[Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = '''cpu'''
__UpperCamelCase :int = self.get_dummy_components()
__UpperCamelCase :List[Any] = self.pipeline_class(**__lowercase)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Any = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :Union[str, Any] = pipe(**__lowercase).images
__UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3))
__UpperCamelCase :int = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4])
__UpperCamelCase :str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(__lowercase , 1E-3)
def UpperCamelCase__ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3E-3)
def UpperCamelCase__ ( self) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = '''google/ddpm-cifar10-32'''
__UpperCamelCase :Optional[Any] = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :Dict = DDIMScheduler()
__UpperCamelCase :Optional[Any] = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddim.to(__lowercase)
ddim.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[str] = torch.manual_seed(0)
__UpperCamelCase :Optional[int] = ddim(generator=__lowercase , eta=0.0 , output_type='''numpy''').images
__UpperCamelCase :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :Union[str, Any] = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[int] = '''google/ddpm-ema-bedroom-256'''
__UpperCamelCase :str = UNetaDModel.from_pretrained(__lowercase)
__UpperCamelCase :int = DDIMScheduler.from_pretrained(__lowercase)
__UpperCamelCase :Any = DDIMPipeline(unet=__lowercase , scheduler=__lowercase)
ddpm.to(__lowercase)
ddpm.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Dict = torch.manual_seed(0)
__UpperCamelCase :Any = ddpm(generator=__lowercase , output_type='''numpy''').images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase :Dict = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 367 | from __future__ import annotations
from math import pi
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case_ = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase )
if save_dir is None:
snake_case_ = f'{dataset}-{pair}'
snake_case_ = Path(UpperCAmelCase )
save_dir.mkdir(exist_ok=UpperCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case_ = 'val' if split == 'validation' else split
snake_case_ = save_dir.joinpath(f'{fn}.source' )
snake_case_ = save_dir.joinpath(f'{fn}.target' )
snake_case_ = src_path.open('w+' )
snake_case_ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 69 | """simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCamelCase = 0
__UpperCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCamelCase = tuple[int, int]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> None:
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = g_cost
snake_case_ = parent
snake_case_ = self.calculate_heuristic()
snake_case_ = self.g_cost + self.h_cost
def a_ ( self) -> float:
snake_case_ = self.pos_x - self.goal_x
snake_case_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__) + abs(lowerCAmelCase__)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self, lowerCAmelCase__) -> bool:
return self.f_cost < other.f_cost
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = Node(start[1], start[0], goal[1], goal[0], 0, lowerCAmelCase__)
snake_case_ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowerCAmelCase__)
snake_case_ = [self.start]
snake_case_ = []
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__)
self.closed_nodes.append(lowerCAmelCase__)
snake_case_ = self.get_successors(lowerCAmelCase__)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__)
else:
self.open_nodes.append(lowerCAmelCase__)
return [self.start.pos]
def a_ ( self, lowerCAmelCase__) -> list[Node]:
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(lowerCAmelCase__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__, lowerCAmelCase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCAmelCase__, ))
return successors
def a_ ( self, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
snake_case_ = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None:
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = AStar(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = False
def a_ ( self) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case_ = self.fwd_astar.open_nodes.pop(0)
snake_case_ = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__, lowerCAmelCase__)
self.fwd_astar.closed_nodes.append(lowerCAmelCase__)
self.bwd_astar.closed_nodes.append(lowerCAmelCase__)
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__)
else:
# retrieve the best current path
snake_case_ = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__)
else:
astar.open_nodes.append(lowerCAmelCase__)
return [self.fwd_astar.start.pos]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> list[TPosition]:
snake_case_ = self.fwd_astar.retrace_path(lowerCAmelCase__)
snake_case_ = self.bwd_astar.retrace_path(lowerCAmelCase__)
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCamelCase = (0, 0)
__UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCamelCase = time.time()
__UpperCamelCase = AStar(init, goal)
__UpperCamelCase = a_star.search()
__UpperCamelCase = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__UpperCamelCase = time.time()
__UpperCamelCase = BidirectionalAStar(init, goal)
__UpperCamelCase = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 69 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000000 ) -> int:
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[Any] = {1: 1}
for inputa in range(2 , UpperCamelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase__ : List[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase__ : Tuple = counter
if counter > pre_counter:
lowerCamelCase__ : Optional[int] = inputa
lowerCamelCase__ : Tuple = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 129 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase__ : Union[str, Any] = {
"""do_resize""": True,
"""size""": {"""height""": 224, """width""": 224},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"""do_convert_rgb""": True,
}
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Optional[int] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: List[str] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: int ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Any = self.get_rust_tokenizer()
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : str = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
lowerCamelCase__ : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : str = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
lowerCamelCase__ : Optional[int] = self.get_image_processor(do_normalize=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=UpperCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : int = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = image_processor(UpperCamelCase__ , return_tensors="""np""" )
lowerCamelCase__ : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Optional[Any] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Dict = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Any = self.prepare_image_inputs()
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Any = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : Optional[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = """Alexandra,T-shirt的价格是15便士。"""
lowerCamelCase__ : Tuple = self.prepare_image_inputs()
lowerCamelCase__ : str = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 129 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = 'laion/clap-htsat-unfused'
_lowercase : Optional[Any] = tempfile.mkdtemp()
def UpperCamelCase ( self, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> str:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.get_tokenizer()
_lowercase : Optional[int] = self.get_feature_extractor()
_lowercase : int = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowercase : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[int] = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
_lowercase : str = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase : List[str] = self.get_feature_extractor(do_normalize=lowerCamelCase, padding_value=1.0)
_lowercase : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCamelCase, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_feature_extractor()
_lowercase : List[Any] = self.get_tokenizer()
_lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : Union[str, Any] = floats_list((3, 10_00))
_lowercase : str = feature_extractor(lowerCamelCase, return_tensors='np')
_lowercase : Tuple = processor(audios=lowerCamelCase, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.get_feature_extractor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Dict = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : Any = 'This is a test string'
_lowercase : List[Any] = processor(text=lowerCamelCase)
_lowercase : Dict = tokenizer(lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = self.get_feature_extractor()
_lowercase : str = self.get_tokenizer()
_lowercase : List[str] = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
_lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase : str = processor.batch_decode(lowerCamelCase)
_lowercase : Any = tokenizer.batch_decode(lowerCamelCase)
self.assertListEqual(lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = self.get_feature_extractor()
_lowercase : Any = self.get_tokenizer()
_lowercase : Tuple = ClapProcessor(tokenizer=lowerCamelCase, feature_extractor=lowerCamelCase)
self.assertListEqual(
processor.model_input_names[2:], feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
| 21 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int]=None, lowerCAmelCase_ : List[Any]=None ):
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Any=9_9 , lowerCAmelCase_ : Any=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Dict=1_6 , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = embed_dim
__lowerCAmelCase = word_embed_proj_dim
__lowerCAmelCase = False
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
__lowerCAmelCase = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
__lowerCAmelCase = TFOPTModel(config=lowerCAmelCase_ )
__lowerCAmelCase = inputs_dict['input_ids']
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict['attention_mask'][:1, :]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def lowercase ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = TFOPTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ):
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
__lowerCAmelCase = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowerCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
__lowerCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
__lowerCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowerCAmelCase = False
self.assertTrue(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
return tf.constant(lowerCAmelCase_, dtype=tf.intaa )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
a_ = 99
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowerCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowerCAmelCase = input_ids.shape[0]
__lowerCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : str ) -> List[str]:
__lowerCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowerCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
__lowerCAmelCase = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
__lowerCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : int ) -> Dict:
super().setUp()
__lowerCAmelCase = 'facebook/opt-350m'
def lowercase ( self : Dict ) -> Any:
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowerCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowerCAmelCase = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
__lowerCAmelCase = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
__lowerCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase ( self : Optional[int] ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase ( self : int ) -> str:
__lowerCAmelCase = 'facebook/opt-125m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> str:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = 'left'
# use different length sentences to test batching
__lowerCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' , padding=lowerCAmelCase_ )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs['attention_mask'] )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ )
__lowerCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = 'facebook/opt-350m'
__lowerCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
__lowerCAmelCase = []
__lowerCAmelCase = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , return_tensors='tf' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_ , max_length=1_0 )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 284 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _SCREAMING_SNAKE_CASE ,unittest.TestCase ):
lowercase_ = LayoutLMTokenizer
lowercase_ = LayoutLMTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
_a = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = """UNwant\u00E9d,running"""
_a = """unwanted, running"""
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
| 351 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( _a ,_a ):
lowercase_ = 'nat'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Dict=[3, 4, 6, 5] , lowerCAmelCase_ : Dict=[2, 4, 8, 16] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=3.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(lowerCAmelCase_ )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_a = layer_scale_init_value
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 179 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : Tuple , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Dict ) -> Any:
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
SCREAMING_SNAKE_CASE__ = truncation
SCREAMING_SNAKE_CASE__ = tokenize_kwargs
SCREAMING_SNAKE_CASE__ = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = return_tensors
return preprocess_params, {}, postprocess_params
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[int] ) -> Dict[str, GenericTensor]:
SCREAMING_SNAKE_CASE__ = self.framework
SCREAMING_SNAKE_CASE__ = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
return model_inputs
def lowercase_ ( self : Dict , __lowerCamelCase : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.model(**__lowerCamelCase )
return model_outputs
def lowercase_ ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple=False ) -> Optional[Any]:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> List[str]:
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
| 314 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_A )[0]
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(_A , _A , _A , 1 )
return data
@deprecated(_A , '''Please use tf.one_hot on tensors.''' )
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(_A ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(_A , '''Please use tf.data to implement this functionality.''' )
def UpperCAmelCase_ ( _A , _A=False , _A=10 ):
'''simple docstring'''
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_A ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(_A )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(_A )
SCREAMING_SNAKE_CASE__ = bytestream.read(_A )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(_A , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_A , _A )
return labels
class UpperCAmelCase__ :
"""simple docstring"""
@deprecated(
__lowerCamelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=False , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=dtypes.floataa , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__lowerCamelCase , 1.0 / 255.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def lowercase_ ( self : Tuple ) -> List[str]:
return self._images
@property
def lowercase_ ( self : List[Any] ) -> Tuple:
return self._labels
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self._num_examples
@property
def lowercase_ ( self : Optional[int] ) -> int:
return self._epochs_completed
def lowercase_ ( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Union[str, Any]=True ) -> str:
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCamelCase )],
[fake_label for _ in range(__lowerCamelCase )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_A , '''Please write your own downloading logic.''' )
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if not gfile.Exists(_A ):
gfile.MakeDirs(_A )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
if not gfile.Exists(_A ):
urllib.request.urlretrieve(_A , _A ) # noqa: S310
with gfile.GFile(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print('''Successfully downloaded''' , _A , _A , '''bytes.''' )
return filepath
@deprecated(
_A , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def UpperCAmelCase_ ( _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=50_00 , _A=None , _A=DEFAULT_SOURCE_URL , ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_A , one_hot=_A , dtype=_A , seed=_A )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=_A , validation=_A , test=_A )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = '''train-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''train-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-images-idx3-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = '''t10k-labels-idx1-ubyte.gz'''
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + train_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_images_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(_A )
SCREAMING_SNAKE_CASE__ = _maybe_download(
_A , _A , source_url + test_labels_file )
with gfile.Open(_A , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(_A , one_hot=_A )
if not 0 <= validation_size <= len(_A ):
SCREAMING_SNAKE_CASE__ = (
'''Validation size should be between 0 and '''
F'''{len(_A )}. Received: {validation_size}.'''
)
raise ValueError(_A )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
SCREAMING_SNAKE_CASE__ = _DataSet(_A , _A , **_A )
return _Datasets(train=_A , validation=_A , test=_A )
| 314 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : Optional[int] , _A : Dict , _A : List[Any]=7 , _A : Optional[int]=3 , _A : int=30 , _A : Optional[int]=400 , _A : str=True , _A : Optional[Any]=None , _A : Optional[Any]=True , _A : Optional[int]=1 / 255 , _A : Optional[Any]=True , _A : int=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : Any = num_channels
snake_case_ : Any = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : List[Any] = size
snake_case_ : List[str] = do_rescale
snake_case_ : Tuple = rescale_factor
snake_case_ : Dict = do_normalize
snake_case_ : List[str] = image_mean
snake_case_ : int = image_std
snake_case_ : Optional[int] = do_pad
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
if not batched:
snake_case_ : Any = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Union[str, Any] = image.size
else:
snake_case_ ,snake_case_ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case_ : List[str] = int(self.size['shortest_edge'] * h / w )
snake_case_ : Optional[Any] = self.size['shortest_edge']
elif w > h:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ : Dict = self.size['shortest_edge']
snake_case_ : Optional[Any] = self.size['shortest_edge']
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : Optional[Any] = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Optional[int] = DetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = DetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_rescale' ) )
self.assertTrue(hasattr(_A , 'rescale_factor' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'do_pad' ) )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _A )
snake_case_ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _A )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : List[str] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ ,snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
snake_case_ : Union[str, Any] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : str = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : int = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Any = {'image_id': 39769, 'annotations': target}
# encode them
snake_case_ : str = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
snake_case_ : Any = image_processing(images=_A , annotations=_A , return_tensors='pt' )
# verify pixel values
snake_case_ : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
snake_case_ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
snake_case_ : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
snake_case_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1E-3 ) )
# verify image_id
snake_case_ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
snake_case_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
snake_case_ : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify orig_size
snake_case_ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
snake_case_ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
@slow
def UpperCAmelCase_ ( self : int ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ : Tuple = json.loads(f.read() )
snake_case_ : Any = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
snake_case_ : int = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ : Union[str, Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
snake_case_ : Any = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='pt' )
# verify pixel values
snake_case_ : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _A )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _A , atol=1E-4 ) )
# verify area
snake_case_ : List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _A ) )
# verify boxes
snake_case_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _A )
snake_case_ : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _A , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _A ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _A ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _A ) )
# verify masks
snake_case_ : List[Any] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _A )
# verify orig_size
snake_case_ : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _A ) )
# verify size
snake_case_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _A ) )
| 88 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 8 ) ->str:
'''simple docstring'''
a : List[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
i -= len(_lowercase )
a : List[str] = i // 3
a : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a : int = (
chars_incl
+ random(_lowercase , quotient + remainder )
+ random(_lowercase , _lowercase )
+ random(_lowercase , _lowercase )
)
a : List[str] = list(_lowercase )
shuffle(_lowercase )
return "".join(_lowercase )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
return "".join(secrets.choice(_lowercase ) for _ in range(_lowercase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->List[str]:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Optional[int] ) ->int:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
pass # Put your code here...
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int = 8 ) ->bool:
'''simple docstring'''
if len(_lowercase ) < min_length:
# Your Password must be at least 8 characters long
return False
a : List[str] = any(char in ascii_uppercase for char in password )
a : Optional[int] = any(char in ascii_lowercase for char in password )
a : List[str] = any(char in digits for char in password )
a : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
a : Dict = int(input("Please indicate the max length of your password: " ).strip() )
a : str = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowercase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowercase , _lowercase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 105 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase (a__ :Optional[Any] , a__ :str=False ):
"""simple docstring"""
UpperCamelCase__ = OmegaConf.load(a__ )
if display:
print(yaml.dump(OmegaConf.to_container(a__ ) ) )
return config
def _UpperCamelCase (a__ :int , a__ :Optional[Any]=None , a__ :str=None ):
"""simple docstring"""
if conf_path is None:
UpperCamelCase__ = """./model_checkpoints/vqgan_only.yaml"""
UpperCamelCase__ = load_config(a__ , display=a__ )
UpperCamelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
UpperCamelCase__ = """./model_checkpoints/vqgan_only.pt"""
UpperCamelCase__ = torch.load(a__ , map_location=a__ )
if ".ckpt" in ckpt_path:
UpperCamelCase__ = sd["""state_dict"""]
model.load_state_dict(a__ , strict=a__ )
model.to(a__ )
del sd
return model
def _UpperCamelCase (a__ :Optional[Any] , a__ :List[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = model.encode(a__ )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCamelCase__ = model.decode(a__ )
return xrec
def _UpperCamelCase (a__ :Dict , a__ :List[Any]=False ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = string.rsplit(""".""" , 1 )
if reload:
UpperCamelCase__ = importlib.import_module(a__ )
importlib.reload(a__ )
return getattr(importlib.import_module(a__ , package=a__ ) , cls )
def _UpperCamelCase (a__ :Optional[int] ):
"""simple docstring"""
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Dict , a__ :Dict=True , a__ :Optional[int]=True ):
"""simple docstring"""
UpperCamelCase__ = instantiate_from_config(a__ )
if sd is not None:
model.load_state_dict(a__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase (a__ :int , a__ :Any , a__ :Optional[int] , a__ :str ):
"""simple docstring"""
if ckpt:
UpperCamelCase__ = torch.load(a__ , map_location="""cpu""" )
UpperCamelCase__ = pl_sd["""global_step"""]
print(f"""loaded model from global step {global_step}.""" )
else:
UpperCamelCase__ = {"""state_dict""": None}
UpperCamelCase__ = None
UpperCamelCase__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=a__ , eval_mode=a__ )["""model"""]
return model, global_step
| 87 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
if hor == 128:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
UpperCamelCase__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCamelCase__ = (32, 64, 128, 256)
UpperCamelCase__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCamelCase__ = torch.load(f"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
UpperCamelCase__ = model.state_dict()
UpperCamelCase__ = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , f"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(f"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCamelCase__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCamelCase__ = model
UpperCamelCase__ = UNetaDModel(**a__ )
print(f"""length of state dict: {len(state_dict.keys() )}""" )
print(f"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
UpperCamelCase__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCamelCase__ = state_dict.pop(a__ )
hf_value_function.load_state_dict(a__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(a__ , a__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 87 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =VQModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : str = 3
lowerCAmelCase__ : int = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
return {"sample": image}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCAmelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : str = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(__lowerCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ : int = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
lowerCAmelCase__ : List[str] = image.to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(__lowerCamelCase ).sample
lowerCAmelCase__ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : int = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 ) )
| 129 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__ ( lowerCamelCase_ : Any):
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ : Dict = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''')
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ : int = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''')
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ : Optional[int] = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''')
if "img_encoder.layers" in name:
lowerCAmelCase__ : Tuple = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''')
if "blocks" in name and "res" not in name:
lowerCAmelCase__ : Dict = name.replace('''blocks''' ,'''layers''')
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ : Optional[int] = name.replace('''attn''' ,'''self_attn''')
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''proj''' ,'''out_proj''')
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ : List[Any] = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''')
if "norm1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''norm1''' ,'''layer_norm1''')
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ : int = name.replace('''norm2''' ,'''layer_norm2''')
if "img_encoder.norm" in name:
lowerCAmelCase__ : List[Any] = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''')
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ : List[Any] = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''')
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''')
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''')
if "ln_1" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_1''' ,'''layer_norm1''')
if "ln_2" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_2''' ,'''layer_norm2''')
if "c_fc" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''c_fc''' ,'''fc1''')
if "c_proj" in name:
lowerCAmelCase__ : List[str] = name.replace('''c_proj''' ,'''fc2''')
if "text_encoder" in name:
lowerCAmelCase__ : str = name.replace('''text_encoder''' ,'''text_model''')
if "ln_final" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('''ln_final''' ,'''final_layer_norm''')
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ : Tuple = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''')
if "img_projector.linear_out." in name:
lowerCAmelCase__ : Optional[Any] = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''')
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ : Tuple = name.replace('''text_projector.linear_hidden''' ,'''text_projection''')
if "text_projector.linear_out" in name:
lowerCAmelCase__ : Dict = name.replace('''text_projector.linear_out''' ,'''text_projection.3''')
return name
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str]):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Tuple = key.split('''.''')
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = int(key_split[2]), int(key_split[4])
lowerCAmelCase__ : Any = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Dict = val[dim : dim * 2, :]
lowerCAmelCase__ : List[str] = val[-dim:, :]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ : Dict = key.split('''.''')
lowerCAmelCase__ : List[str] = int(key_split[3])
lowerCAmelCase__ : Any = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : Tuple = val[:dim, :]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase__ : Union[str, Any] = val[:dim]
lowerCAmelCase__ : List[str] = val[dim : dim * 2]
lowerCAmelCase__ : str = val[-dim:]
else:
lowerCAmelCase__ : int = rename_key(lowerCamelCase_)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ : Dict = val.squeeze_()
else:
lowerCAmelCase__ : Tuple = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw)
return im
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple="groupvit-gcc-yfcc" ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Dict = GroupViTConfig()
lowerCAmelCase__ : Dict = GroupViTModel(lowerCamelCase_).eval()
lowerCAmelCase__ : Optional[int] = torch.load(lowerCamelCase_ ,map_location='''cpu''')['''model''']
lowerCAmelCase__ : List[Any] = convert_state_dict(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : Any = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase_) == 0)
# verify result
lowerCAmelCase__ : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : Dict = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors='''pt''')
with torch.no_grad():
lowerCAmelCase__ : str = model(**lowerCamelCase_)
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ : Tuple = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(f"""Model name {model_name} not supported.""")
assert torch.allclose(outputs.logits_per_image ,lowerCamelCase_ ,atol=1E-3)
processor.save_pretrained(lowerCamelCase_)
model.save_pretrained(lowerCamelCase_)
print('''Successfully saved processor and model to''' ,lowerCamelCase_)
if push_to_hub:
print('''Pushing to the hub...''')
processor.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
model.push_to_hub(lowerCamelCase_ ,organization='''nielsr''')
if __name__ == "__main__":
__snake_case : int =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case : Tuple =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 129 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["image_processor", "tokenizer"]
_SCREAMING_SNAKE_CASE : List[str] = "FlavaImageProcessor"
_SCREAMING_SNAKE_CASE : Dict = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
__UpperCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" )
__UpperCAmelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
__UpperCAmelCase : Dict = self.image_processor
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : Optional[Any] = self.tokenizer(
text=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if images is not None:
__UpperCAmelCase : List[Any] = self.image_processor(
__UpperCAmelCase , return_image_mask=__UpperCAmelCase , return_codebook_pixels=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(__UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def __A ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.tokenizer.model_input_names
__UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , )
return self.image_processor_class
@property
def __A ( self ) -> int:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , )
return self.image_processor
| 355 |
'''simple docstring'''
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : int = data
__UpperCAmelCase : int = previous
__UpperCAmelCase : Union[str, Any] = next_node
def __str__( self ) -> str:
'''simple docstring'''
return f'{self.data}'
def __A ( self ) -> int:
'''simple docstring'''
return self.data
def __A ( self ) -> List[str]:
'''simple docstring'''
return self.next
def __A ( self ) -> str:
'''simple docstring'''
return self.previous
class _A :
def __init__( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = head
def __iter__( self ) -> str:
'''simple docstring'''
return self
def __A ( self ) -> str:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__UpperCAmelCase : List[str] = self.current.get_data()
__UpperCAmelCase : int = self.current.get_next()
return value
class _A :
def __init__( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = None # First node in list
__UpperCAmelCase : List[str] = None # Last node in list
def __str__( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.head
__UpperCAmelCase : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__UpperCAmelCase : Any = current.get_next()
return " ".join(str(__UpperCAmelCase ) for node in nodes )
def __contains__( self , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.head
while current:
if current.get_data() == value:
return True
__UpperCAmelCase : Optional[Any] = current.get_next()
return False
def __iter__( self ) -> str:
'''simple docstring'''
return LinkedListIterator(self.head )
def __A ( self ) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
__UpperCAmelCase : str = node
__UpperCAmelCase : List[str] = node
else:
self.insert_before_node(self.head , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Node(__UpperCAmelCase )
if self.head is None:
self.set_head(__UpperCAmelCase )
else:
self.set_tail(__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Tuple = node
__UpperCAmelCase : List[Any] = node.previous
if node.get_previous() is None:
__UpperCAmelCase : str = node_to_insert
else:
__UpperCAmelCase : Optional[Any] = node_to_insert
__UpperCAmelCase : List[Any] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[str] = node
__UpperCAmelCase : Union[str, Any] = node.next
if node.get_next() is None:
__UpperCAmelCase : Dict = node_to_insert
else:
__UpperCAmelCase : Any = node_to_insert
__UpperCAmelCase : List[str] = node_to_insert
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Optional[Any] = Node(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__UpperCAmelCase , __UpperCAmelCase )
return
current_position += 1
__UpperCAmelCase : int = node.next
self.insert_after_node(self.tail , __UpperCAmelCase )
def __A ( self , __UpperCAmelCase ) -> Node:
'''simple docstring'''
__UpperCAmelCase : Dict = self.head
while node:
if node.get_data() == item:
return node
__UpperCAmelCase : List[str] = node.get_next()
raise Exception("""Node not found""" )
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if (node := self.get_node(__UpperCAmelCase )) is not None:
if node == self.head:
__UpperCAmelCase : Optional[int] = self.head.get_next()
if node == self.tail:
__UpperCAmelCase : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(__UpperCAmelCase )
@staticmethod
def __A ( __UpperCAmelCase ) -> None:
'''simple docstring'''
if node.get_next():
__UpperCAmelCase : Optional[Any] = node.previous
if node.get_previous():
__UpperCAmelCase : int = node.next
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Union[str, Any] = None
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.head is None
def lowercase_ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 85 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 | 0 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 363 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A : List[Any] = 'base_with_context'
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCamelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Any = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : int = ly_weight['''attention''']
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Tuple = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : str = ly_weight['''attention''']
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : List[Any] = weights[f"layers_{lyr_num}"]
lowerCamelCase__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = ly_weight['''self_attention''']
lowerCamelCase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : Dict = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCamelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCamelCase__ : Any = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCamelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCamelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCamelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCamelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[int] = jnp.tree_util.tree_map(onp.array , UpperCAmelCase )
lowerCamelCase__ : List[str] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCamelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCamelCase__ : Optional[Any] = inference.parse_training_gin_file(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = inference.InferenceModel(args.checkpoint_path , UpperCAmelCase )
lowerCamelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCamelCase__ : str = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : int = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCamelCase__ : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : Optional[int] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : int = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCAmelCase )
lowerCamelCase__ : List[str] = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCamelCase__ : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=UpperCAmelCase , continuous_encoder=UpperCAmelCase , decoder=UpperCAmelCase , scheduler=UpperCAmelCase , melgan=UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A : int = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_A : Tuple = parser.parse_args()
main(args)
| 265 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __get__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__ = """__cached_""" + self.fget.__name__
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
__magic_name__ = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( A_ ):
'''simple docstring'''
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_, (jnp.ndarray, Tracer) ):
return True
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return _is_numpy(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.device )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
if isinstance(A_, A_ ):
if hasattr(A_, A_ ):
__magic_name__ = getattr(A_, A_ )
else:
return False
return isinstance(A_, torch.dtype )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A_, tf.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_, """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def a__ ( A_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A_, jnp.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A_ )
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ = getattr(self , class_fields[0].name )
__magic_name__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = first_field.items()
__magic_name__ = True
else:
try:
__magic_name__ = iter(UpperCamelCase__ )
__magic_name__ = True
except TypeError:
__magic_name__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ = element[1]
elif first_field is not None:
__magic_name__ = first_field
else:
for field in class_fields:
__magic_name__ = getattr(self , field.name )
if v is not None:
__magic_name__ = v
def __delitem__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """longest"""
a__ = """max_length"""
a__ = """do_not_pad"""
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pt"""
a__ = """tf"""
a__ = """np"""
a__ = """jax"""
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[ContextManager] ) -> Any:
"""simple docstring"""
__magic_name__ = context_managers
__magic_name__ = ExitStack()
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = model_class.__name__
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( A_, A_ = "", A_ = "." ):
'''simple docstring'''
def _flatten_dict(A_, A_="", A_="." ):
for k, v in d.items():
__magic_name__ = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_, A_ ):
yield from flatten_dict(A_, A_, delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_, A_, A_ ) )
@contextmanager
def a__ ( A_, A_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.transpose(A_, axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_, perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_, axes=A_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.reshape(A_, A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_, A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_, A_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A_ )}.''' )
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.squeeze(A_, axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.expand_dims(A_, A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A_, (tuple, list) ):
__magic_name__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ = f'''{repo_id}--{value}'''
return auto_map
def a__ ( A_ ):
'''simple docstring'''
for base_class in inspect.getmro(A_ ):
__magic_name__ = base_class.__module__
__magic_name__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 88 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> str:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__a = ''''''
while len(lowerCAmelCase__ ) % 3 != 0:
__a = '''0''' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(lowerCAmelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(lowerCAmelCase__ ):
oct_val += int(2 ** (2 - index) * int(lowerCAmelCase__ ) )
oct_string += str(lowerCAmelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
UpperCamelCase = '''sshleifer/mar_enro_6_3_student'''
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Dict ) -> int:
super().setUp()
lowercase__ : Any = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowercase_ , )
lowercase__ : Dict = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __UpperCamelCase ( self : List[str] ) -> Tuple:
MarianMTModel.from_pretrained(lowercase_ )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Optional[int] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
lowercase__ : Any = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
lowercase__ : int = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
lowercase__ : Tuple = bash_script.replace(lowercase_ , str(lowercase_ ) )
lowercase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ : Tuple = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ : Optional[int] = ["finetune.py"] + bash_script.split() + args
with patch.object(lowercase_ , "argv" , lowercase_ ):
lowercase__ : Dict = argparse.ArgumentParser()
lowercase__ : str = pl.Trainer.add_argparse_args(lowercase_ )
lowercase__ : Dict = SummarizationModule.add_model_specific_args(lowercase_ , os.getcwd() )
lowercase__ : List[Any] = parser.parse_args()
lowercase__ : Optional[int] = main(lowercase_ )
# Check metrics
lowercase__ : str = load_json(model.metrics_save_path )
lowercase__ : Union[str, Any] = metrics["val"][0]
lowercase__ : Optional[Any] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowercase_ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ : Optional[int] = os.listdir(lowercase_ )
lowercase__ : List[str] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase__ : str = os.path.join(args.output_dir , lowercase_ )
lowercase__ : str = torch.load(lowercase_ , map_location="cpu" )
lowercase__ : List[Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ : List[str] = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class snake_case_ ( __A ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
lowercase__ : int = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ : Any = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
lowercase__ : str = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
lowercase__ : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
lowercase__ : Any = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
lowercase__ : List[Any] = bash_script.replace(lowercase_ , str(lowercase_ ) )
lowercase__ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase__ : int = bash_script.replace("--fp16" , "" )
lowercase__ : List[Any] = 6
lowercase__ : Any = (
["distillation.py"]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"--gpus=1",
"--learning_rate=1e-3",
F'''--num_train_epochs={epochs}''',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(lowercase_ , "argv" , lowercase_ ):
lowercase__ : str = argparse.ArgumentParser()
lowercase__ : List[str] = pl.Trainer.add_argparse_args(lowercase_ )
lowercase__ : List[Any] = SummarizationDistiller.add_model_specific_args(lowercase_ , os.getcwd() )
lowercase__ : Optional[int] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ : Dict = distill_main(lowercase_ )
# Check metrics
lowercase__ : Optional[Any] = load_json(model.metrics_save_path )
lowercase__ : List[Any] = metrics["val"][0]
lowercase__ : int = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowercase_ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ : Union[str, Any] = os.listdir(lowercase_ )
lowercase__ : Optional[int] = [x for x in contents if x.endswith(".ckpt" )][0]
lowercase__ : Optional[int] = os.path.join(args.output_dir , lowercase_ )
lowercase__ : Dict = torch.load(lowercase_ , map_location="cpu" )
lowercase__ : Union[str, Any] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ : Any = {os.path.basename(lowercase_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: List[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def a__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Dict = emb.weight.shape
__lowerCAmelCase: List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
__lowerCAmelCase: Optional[int] = emb.weight.data
return lin_layer
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
__lowerCAmelCase: int = torch.load(__lowerCamelCase , map_location="cpu" )['''model''']
remove_ignore_keys_(__lowerCamelCase )
__lowerCAmelCase: Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowerCAmelCase: Optional[int] = MBartConfig.from_pretrained(__lowerCamelCase , vocab_size=__lowerCamelCase )
if mbart_aa and finetuned:
__lowerCAmelCase: Tuple = '''relu'''
__lowerCAmelCase: List[Any] = state_dict['''decoder.embed_tokens.weight''']
__lowerCAmelCase: Tuple = MBartForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase )
if finetuned:
__lowerCAmelCase: Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__A = parser.parse_args()
__A = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 217 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
lowerCAmelCase_ = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = "tapas"
def __init__( self : List[Any] ,_snake_case : Dict=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : int=12 ,_snake_case : Union[str, Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : List[Any]="gelu" ,_snake_case : Optional[int]=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=1_024 ,_snake_case : Any=[3, 256, 256, 2, 256, 256, 10] ,_snake_case : List[Any]=0.02 ,_snake_case : Union[str, Any]=1e-12 ,_snake_case : str=0 ,_snake_case : Any=10.0 ,_snake_case : int=0 ,_snake_case : Optional[Any]=1.0 ,_snake_case : List[str]=None ,_snake_case : Tuple=1.0 ,_snake_case : Tuple=False ,_snake_case : List[Any]=None ,_snake_case : int=1.0 ,_snake_case : List[Any]=1.0 ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]=False ,_snake_case : Optional[int]="ratio" ,_snake_case : Any=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[str]=64 ,_snake_case : Optional[Any]=32 ,_snake_case : Optional[Any]=False ,_snake_case : Optional[int]=True ,_snake_case : Dict=False ,_snake_case : Tuple=False ,_snake_case : int=True ,_snake_case : List[str]=False ,_snake_case : Dict=None ,_snake_case : Optional[int]=None ,**_snake_case : int ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,**_snake_case )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Optional[int] = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Dict = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Dict = type_vocab_sizes
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ : Any = positive_label_weight
lowercase__ : int = num_aggregation_labels
lowercase__ : List[str] = aggregation_loss_weight
lowercase__ : Optional[int] = use_answer_as_supervision
lowercase__ : Optional[Any] = answer_loss_importance
lowercase__ : Union[str, Any] = use_normalized_answer_loss
lowercase__ : str = huber_loss_delta
lowercase__ : str = temperature
lowercase__ : int = aggregation_temperature
lowercase__ : List[Any] = use_gumbel_for_cells
lowercase__ : Tuple = use_gumbel_for_aggregation
lowercase__ : Union[str, Any] = average_approximation_function
lowercase__ : Union[str, Any] = cell_selection_preference
lowercase__ : Any = answer_loss_cutoff
lowercase__ : List[Any] = max_num_rows
lowercase__ : str = max_num_columns
lowercase__ : int = average_logits_per_cell
lowercase__ : str = select_one_column
lowercase__ : str = allow_empty_column_selection
lowercase__ : Any = init_cell_selection_weights_to_zero
lowercase__ : Optional[int] = reset_position_index_per_cell
lowercase__ : Union[str, Any] = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ : Optional[Any] = aggregation_labels
lowercase__ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels ,_snake_case ):
lowercase__ : Union[str, Any] = {int(_snake_case ): v for k, v in aggregation_labels.items()}
| 16 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 264 |
'''simple docstring'''
import qiskit
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase_ : Dict = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : Union[str, Any] = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 264 | 1 |
import numpy as np
def lowerCAmelCase ( lowerCAmelCase_ )-> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 262 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[int] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 265 | 0 |
import os
def a_ ( __lowercase : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) as in_file:
_snake_case = in_file.read()
_snake_case = [[int(__lowercase ) for cell in row.split(',' )] for row in data.strip().splitlines()]
_snake_case = [[0 for cell in row] for row in grid]
_snake_case = len(grid[0] )
_snake_case = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
_snake_case = grid[0][0]
for i in range(1 , __lowercase ):
_snake_case = grid[0][i] + dp[0][i - 1]
for i in range(1 , __lowercase ):
_snake_case = grid[i][0] + dp[i - 1][0]
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
_snake_case = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }') | 130 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def a_ ( __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : str ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(__lowercase ) as metadata_file:
_snake_case = json.load(__lowercase )
_snake_case = LukeConfig(use_entity_aware_attention=__lowercase , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_snake_case = torch.load(__lowercase , map_location='cpu' )['module']
# Load the entity vocab file
_snake_case = load_original_entity_vocab(__lowercase )
# add an entry for [MASK2]
_snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_snake_case = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case = AddedToken('<ent>' , lstrip=__lowercase , rstrip=__lowercase )
_snake_case = AddedToken('<ent2>' , lstrip=__lowercase , rstrip=__lowercase )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , 'tokenizer_config.json' ) , 'r' ) as f:
_snake_case = json.load(__lowercase )
_snake_case = 'MLukeTokenizer'
with open(os.path.join(__lowercase , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__lowercase , __lowercase )
_snake_case = MLukeTokenizer.from_pretrained(__lowercase )
# Initialize the embeddings of the special tokens
_snake_case = tokenizer.convert_tokens_to_ids(['@'] )[0]
_snake_case = tokenizer.convert_tokens_to_ids(['#'] )[0]
_snake_case = state_dict['embeddings.word_embeddings.weight']
_snake_case = word_emb[ent_init_index].unsqueeze(0 )
_snake_case = word_emb[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_snake_case = state_dict[bias_name]
_snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
_snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case = f'''encoder.layer.{layer_index}.attention.self.'''
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case = state_dict['entity_embeddings.entity_embeddings.weight']
_snake_case = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_snake_case = state_dict['entity_predictions.bias']
_snake_case = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
_snake_case = LukeForMaskedLM(config=__lowercase ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_snake_case = state_dict[key]
else:
_snake_case = state_dict[key]
_snake_case , _snake_case = model.load_state_dict(__lowercase , strict=__lowercase )
if set(__lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_snake_case = MLukeTokenizer.from_pretrained(__lowercase , task='entity_classification' )
_snake_case = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_snake_case = (0, 9)
_snake_case = tokenizer(__lowercase , entity_spans=[span] , return_tensors='pt' )
_snake_case = model(**__lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 33, 768) )
_snake_case = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 1, 768) )
_snake_case = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_snake_case = MLukeTokenizer.from_pretrained(__lowercase )
_snake_case = 'Tokyo is the capital of <mask>.'
_snake_case = (24, 30)
_snake_case = tokenizer(__lowercase , entity_spans=[span] , return_tensors='pt' )
_snake_case = model(**__lowercase )
_snake_case = encoding['input_ids'][0].tolist()
_snake_case = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__lowercase )
_snake_case = outputs.entity_logits[0][0].argmax().item()
_snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__lowercase ) )
model.save_pretrained(__lowercase )
def a_ ( __lowercase : int ) -> int:
_snake_case = ['[MASK]', '[PAD]', '[UNK]']
_snake_case = [json.loads(__lowercase ) for line in open(__lowercase )]
_snake_case = {}
for entry in data:
_snake_case = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_snake_case = entity_id
break
_snake_case = f'''{language}:{entity_name}'''
_snake_case = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 130 | 1 |
def _UpperCAmelCase (UpperCamelCase__ : str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_A : Any = ""
while len(UpperCamelCase__ ) % 3 != 0:
_A : Optional[Any] = "0" + bin_string
_A : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_A : Tuple = 0
for index, val in enumerate(UpperCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase__ ) )
oct_string += str(UpperCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 1_3
UpperCamelCase = 7
UpperCamelCase = 3_0
UpperCamelCase = self.seq_length + self.mem_len
UpperCamelCase = 1_5
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 9_9
UpperCamelCase = [1_0, 5_0, 8_0]
UpperCamelCase = 3_2
UpperCamelCase = 3_2
UpperCamelCase = 4
UpperCamelCase = 8
UpperCamelCase = 1_2_8
UpperCamelCase = 2
UpperCamelCase = 2
UpperCamelCase = None
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = 3
UpperCamelCase = self.vocab_size - 1
UpperCamelCase = 0.0_1
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def A ( self : Any ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def A ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLModel(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLLMHeadModel(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
UpperCamelCase , UpperCamelCase = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCamelCase , UpperCamelCase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_SCREAMING_SNAKE_CASE = () if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ):
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
UpperCamelCase = model.get_bias()
assert name is None
else:
UpperCamelCase = model.get_output_embeddings()
assert x is None
UpperCamelCase = model.get_bias()
assert name is None
def A ( self : str ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCamelCase = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase = model.generate(UpperCamelCase__ , max_length=2_0_0 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 249 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
# choose the middle 3 elements
UpperCamelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 | 1 |
import numpy as np
def a__ ( A_ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def a__ ( A_ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Any = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A : Optional[Any] =logging.get_logger(__name__)
_A : int ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
for attribute in key.split(""".""" ):
lowerCamelCase__ : List[Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ : Dict = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCamelCase__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Dict = value
elif weight_type == "weight_g":
lowerCamelCase__ : Optional[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : str = value
elif weight_type == "bias":
lowerCamelCase__ : List[Any] = value
else:
lowerCamelCase__ : Any = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Dict = fairseq_model.state_dict()
lowerCamelCase__ : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ : Tuple = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
lowerCamelCase__ : Tuple = True
if "*" in mapped_key:
lowerCamelCase__ : str = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCamelCase__ : Union[str, Any] = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ : List[Any] = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ : Any = '''weight_v'''
elif "weight" in name:
lowerCamelCase__ : Optional[int] = '''weight'''
elif "bias" in name:
lowerCamelCase__ : Union[str, Any] = '''bias'''
else:
lowerCamelCase__ : List[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
lowerCamelCase__ : int = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ : Union[str, Any] = name.split(""".""" )
lowerCamelCase__ : Tuple = int(items[0] )
lowerCamelCase__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : Any = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True ) -> Union[str, Any]:
if config_path is not None:
lowerCamelCase__ : List[Any] = HubertConfig.from_pretrained(UpperCamelCase )
else:
lowerCamelCase__ : str = HubertConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__ : str = Dictionary.load(UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ : List[str] = target_dict.pad_index
lowerCamelCase__ : int = target_dict.bos_index
lowerCamelCase__ : List[str] = target_dict.eos_index
lowerCamelCase__ : Any = len(target_dict.symbols )
lowerCamelCase__ : str = os.path.join(UpperCamelCase , """vocab.json""" )
if not os.path.isdir(UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase ) )
return
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase )
lowerCamelCase__ : List[Any] = WavaVecaCTCTokenizer(
UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase , )
lowerCamelCase__ : Optional[int] = True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase , return_attention_mask=UpperCamelCase , )
lowerCamelCase__ : Optional[Any] = WavaVecaProcessor(feature_extractor=UpperCamelCase , tokenizer=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
lowerCamelCase__ : Tuple = HubertForCTC(UpperCamelCase )
else:
lowerCamelCase__ : Optional[Any] = HubertModel(UpperCamelCase )
if is_finetuned:
lowerCamelCase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowerCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__ : int = model[0].eval()
recursively_load_weights(UpperCamelCase , UpperCamelCase , UpperCamelCase )
hf_wavavec.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_A : Union[str, Any] =parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 367 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
_A : Dict =parser.parse_args()
_A : List[str] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A : Any =CLIPImageProcessor()
_A : Union[str, Any] =CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
_A : Union[str, Any] =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 129 | 0 |
"""simple docstring"""
def __lowercase ( _a ):
assert column_title.isupper()
snake_case_ : Optional[Any] = 0
snake_case_ : Union[str, Any] = len(_a ) - 1
snake_case_ : List[str] = 0
while index >= 0:
snake_case_ : int = (ord(column_title[index] ) - 64) * pow(26 , _a )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 1 |
import numpy as np
_snake_case = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class _snake_case :
def __init__( self: Dict ) -> List[str]:
__UpperCAmelCase : Tuple = np.array(snake_case_ )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Optional[Any]:
__UpperCAmelCase : Any = np.where(letter == self.SQUARE )
__UpperCAmelCase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int ) -> Tuple:
__UpperCAmelCase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str ) -> Optional[Any]:
__UpperCAmelCase : Tuple = message.lower()
__UpperCAmelCase : Tuple = message.replace(" " , "" )
__UpperCAmelCase : Dict = message.replace("j" , "i" )
__UpperCAmelCase : str = np.empty((2, len(snake_case_ )) )
for letter_index in range(len(snake_case_ ) ):
__UpperCAmelCase : str = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : Dict = numbers[0]
__UpperCAmelCase : Union[str, Any] = numbers[1]
__UpperCAmelCase : Dict = first_step.reshape(2 * len(snake_case_ ) )
__UpperCAmelCase : List[str] = ''''''
for numbers_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Dict = int(second_step[numbers_index * 2] )
__UpperCAmelCase : Any = int(second_step[(numbers_index * 2) + 1] )
__UpperCAmelCase : Optional[int] = self.numbers_to_letter(snake_case_ , snake_case_ )
__UpperCAmelCase : Any = encoded_message + letter
return encoded_message
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = message.lower()
message.replace(" " , "" )
__UpperCAmelCase : Union[str, Any] = np.empty(2 * len(snake_case_ ) )
for letter_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
__UpperCAmelCase : int = numbers[0]
__UpperCAmelCase : Union[str, Any] = numbers[1]
__UpperCAmelCase : Dict = first_step.reshape((2, len(snake_case_ )) )
__UpperCAmelCase : Optional[Any] = ''''''
for numbers_index in range(len(snake_case_ ) ):
__UpperCAmelCase : Union[str, Any] = int(second_step[0, numbers_index] )
__UpperCAmelCase : str = int(second_step[1, numbers_index] )
__UpperCAmelCase : Dict = self.numbers_to_letter(snake_case_ , snake_case_ )
__UpperCAmelCase : Optional[int] = decoded_message + letter
return decoded_message
| 352 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | 0 |
import os
# Precomputes a list of the 100 first triangular numbers
lowerCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = os.path.dirname(os.path.realpath(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , "words.txt" )
lowercase__ : int = ""
with open(lowerCamelCase__ ) as f:
lowercase__ : Any = f.readline()
lowercase__ : Any = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowercase__ : str = [
word
for word in [sum(ord(lowerCamelCase__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCamelCase__ )
if __name__ == "__main__":
print(solution())
| 130 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : List[str] ):
lowercase__ : int = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
lowercase__ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
lowercase__ : Any = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).loss
lowercase__ : Dict = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE ).numpy()
lowercase__ : Optional[Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 130 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """wav2vec2"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Dict=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1E-5 , SCREAMING_SNAKE_CASE_ : List[Any]="group" , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE_ : Any=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(1_0, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[str]=1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=0.05 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2_0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_0 , SCREAMING_SNAKE_CASE_ : Dict=2_5_6 , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple="sum" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=2_5_6 , SCREAMING_SNAKE_CASE_ : List[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , SCREAMING_SNAKE_CASE_ : str=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : Optional[Any]=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Optional[int] = feat_extract_norm
lowerCAmelCase_ : Any = feat_extract_activation
lowerCAmelCase_ : Dict = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = conv_bias
lowerCAmelCase_ : List[str] = num_conv_pos_embeddings
lowerCAmelCase_ : Optional[Any] = num_conv_pos_embedding_groups
lowerCAmelCase_ : List[str] = len(self.conv_dim )
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = hidden_dropout
lowerCAmelCase_ : Union[str, Any] = attention_dropout
lowerCAmelCase_ : Optional[Any] = activation_dropout
lowerCAmelCase_ : Tuple = feat_proj_dropout
lowerCAmelCase_ : str = final_dropout
lowerCAmelCase_ : Tuple = layerdrop
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : str = do_stable_layer_norm
lowerCAmelCase_ : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ : Tuple = apply_spec_augment
lowerCAmelCase_ : Optional[int] = mask_time_prob
lowerCAmelCase_ : List[str] = mask_time_length
lowerCAmelCase_ : int = mask_time_min_masks
lowerCAmelCase_ : Union[str, Any] = mask_feature_prob
lowerCAmelCase_ : int = mask_feature_length
lowerCAmelCase_ : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase_ : Optional[int] = num_codevectors_per_group
lowerCAmelCase_ : str = num_codevector_groups
lowerCAmelCase_ : Optional[Any] = contrastive_logits_temperature
lowerCAmelCase_ : List[Any] = feat_quantizer_dropout
lowerCAmelCase_ : Union[str, Any] = num_negatives
lowerCAmelCase_ : Any = codevector_dim
lowerCAmelCase_ : Dict = proj_codevector_dim
lowerCAmelCase_ : Any = diversity_loss_weight
# ctc loss
lowerCAmelCase_ : Dict = ctc_loss_reduction
lowerCAmelCase_ : Optional[Any] = ctc_zero_infinity
# adapter
lowerCAmelCase_ : int = add_adapter
lowerCAmelCase_ : Any = adapter_kernel_size
lowerCAmelCase_ : List[str] = adapter_stride
lowerCAmelCase_ : List[str] = num_adapter_layers
lowerCAmelCase_ : Tuple = output_hidden_size or hidden_size
lowerCAmelCase_ : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 289 |
"""simple docstring"""
import baseaa
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode('utf-8' ) )
def UpperCamelCase_ ( lowerCAmelCase__ : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowerCAmelCase__ ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =GPTSanJapaneseTokenizer
UpperCamelCase =False
UpperCamelCase ={"do_clean_text": False, "add_prefix_space": False}
def _lowerCamelCase ( self ) -> List[Any]:
super().setUp()
# fmt: off
__lowercase : List[str] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__lowercase : str = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__lowercase : Tuple = {'''unk_token''': '''<unk>'''}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase_ ) )
def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Tuple = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__lowercase : str = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
__lowercase ,__lowercase : List[str] = self.get_input_output_texts(UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def _lowerCamelCase ( self ) -> str:
pass # TODO add if relevant
def _lowerCamelCase ( self ) -> Tuple:
pass # TODO add if relevant
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Dict = self.get_tokenizer()
# Testing tokenization
__lowercase : Union[str, Any] = '''こんにちは、世界。 こんばんは、㔺界。'''
__lowercase : List[Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__lowercase : Tuple = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids without special tokens
__lowercase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids with special tokens
__lowercase : Any = tokens + [tokenizer.unk_token]
__lowercase : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__lowercase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = self.get_tokenizer()
# Testing tokenization
__lowercase : Optional[Any] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__lowercase : Dict = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
__lowercase : Any = tokenizer.encode(UpperCamelCase_ )
__lowercase : Dict = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase : Tuple = '''こんにちは、世界。'''
__lowercase : Dict = '''こんばんは、㔺界。😀'''
__lowercase : Optional[int] = '''こんにちは、世界。こんばんは、世界。😀'''
__lowercase : Any = tokenizer.encode(prefix_text + input_text )
__lowercase : Union[str, Any] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__lowercase : List[str] = tokenizer.encode(UpperCamelCase_ , prefix_text=UpperCamelCase_ )
__lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
__lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
__lowercase : Union[str, Any] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> Any:
__lowercase : Dict = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__lowercase : Union[str, Any] = '''こんにちは、世界。'''
__lowercase : Union[str, Any] = '''こんばんは、㔺界。😀'''
__lowercase : List[str] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
__lowercase : Tuple = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
__lowercase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__lowercase : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
__lowercase : Optional[int] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__lowercase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__lowercase : Optional[Any] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__lowercase : Union[str, Any] = tokenizer(UpperCamelCase_ , prefix_text=UpperCamelCase_ ).token_type_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase : List[Any] = tokenizer.encode('''あンいワ''' )
__lowercase : Any = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
__lowercase : Tuple = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : List[str] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__lowercase : str = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__lowercase : List[str] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
__lowercase : List[str] = tokenizer.batch_encode_plus(UpperCamelCase_ , padding=UpperCamelCase_ )
# fmt: off
__lowercase : List[Any] = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__lowercase : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__lowercase : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> str:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def _lowerCamelCase ( self ) -> Any:
# tokenizer has no padding token
pass
| 249 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
a_ = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="ernie_m"
UpperCamelCase ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCamelCase_ = 25_00_02 , UpperCamelCase_ = 7_68 , UpperCamelCase_ = 12 , UpperCamelCase_ = 12 , UpperCamelCase_ = 30_72 , UpperCamelCase_ = "gelu" , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 0.1 , UpperCamelCase_ = 5_14 , UpperCamelCase_ = 0.0_2 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1E-05 , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : List[str] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_act
__lowercase : List[str] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = max_position_embeddings
__lowercase : Optional[Any] = initializer_range
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = classifier_dropout
__lowercase : List[Any] = is_decoder
__lowercase : List[str] = act_dropout
| 249 | 1 |
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
__a = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__a = logging.WARNING
def __UpperCAmelCase ( ):
_UpperCAmelCase : Tuple = os.getenv("DATASETS_VERBOSITY", a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def __UpperCAmelCase ( ):
return __name__.split("." )[0]
def __UpperCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def __UpperCAmelCase ( ):
# Apply our default configuration to the library root logger.
_UpperCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Tuple = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __UpperCAmelCase ( a_: Optional[str] = None ):
if name is None:
_UpperCAmelCase : List[Any] = _get_library_name()
return logging.getLogger(a_ )
def __UpperCAmelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def __UpperCAmelCase ( a_: int ):
_get_library_root_logger().setLevel(a_ )
def __UpperCAmelCase ( ):
return set_verbosity(a_ )
def __UpperCAmelCase ( ):
return set_verbosity(a_ )
def __UpperCAmelCase ( ):
return set_verbosity(a_ )
def __UpperCAmelCase ( ):
return set_verbosity(a_ )
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Any = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class A__ :
"""simple docstring"""
def __init__( self : int , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str] ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = args[0] if args else None
def __iter__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : int , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
def empty_fn(*lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : List[Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self
def __exit__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> Union[str, Any]:
"""simple docstring"""
return
__a = True
class A__ :
"""simple docstring"""
def __call__( self : str , *lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__a = _tqdm_cls()
def __UpperCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __UpperCAmelCase ( ):
global _tqdm_active
_UpperCAmelCase : Dict = True
def __UpperCAmelCase ( ):
global _tqdm_active
_UpperCAmelCase : List[Any] = False | 17 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( a_: str ):
for param in module.parameters():
_UpperCAmelCase : Any = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : int = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = datetime.now()
_UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp | 17 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE : str = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE : Tuple = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = claim_vector
__snake_case : Dict = allocated_resources_table
__snake_case : Union[str, Any] = maximum_claim_table
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {self.__need().index(a_ ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
__snake_case : str = self.__need()
__snake_case : Optional[int] = self.__allocated_resources_table
__snake_case : Dict = self.__available_resources()
__snake_case : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
__snake_case : Dict = False
for each_need in need_list:
__snake_case : Optional[int] = True
for index, need in enumerate(a_ ):
if need > available_resources[index]:
__snake_case : List[Any] = False
break
if execution:
__snake_case : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__snake_case : Optional[int] = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(a_ )
# update available/freed resources stack
__snake_case : List[Any] = np.array(a_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(a_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(a_ ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(a_ ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(a_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(a_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = 1
while len(lowerCamelCase_) < 1E6:
constant.append(str(lowerCamelCase_))
i += 1
lowerCAmelCase__ : Union[str, Any] = ''''''.join(lowerCamelCase_)
return (
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[99999])
* int(constant[999999])
)
if __name__ == "__main__":
print(solution())
| 129 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_lowerCamelCase : int = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_lowerCamelCase : int = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
_lowerCamelCase : Optional[Any] = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
_lowerCamelCase : Dict = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
_lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(64, 64)
)
_lowerCamelCase : Any = tf.keras.preprocessing.image.img_to_array(test_image)
_lowerCamelCase : Tuple = np.expand_dims(test_image, axis=0)
_lowerCamelCase : Optional[int] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_lowerCamelCase : List[str] = '''Normal'''
if result[0][0] == 1:
_lowerCamelCase : List[str] = '''Abnormality detected'''
| 206 | import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase : Dict = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowerCamelCase : Optional[int] = None
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=UpperCAmelCase__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=UpperCAmelCase__ , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa["answers"]["text"] )
return qid_to_has_ans
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] ):
def remove_articles(UpperCAmelCase__ : List[str] ):
return ARTICLES_REGEX.sub(" " , UpperCAmelCase__ )
def white_space_fix(UpperCAmelCase__ : Dict ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase__ : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def __lowerCamelCase (UpperCAmelCase__ : List[str] ):
if not s:
return []
return normalize_answer(UpperCAmelCase__ ).split()
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ):
return int(normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = get_tokens(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = collections.Counter(UpperCAmelCase__ ) & collections.Counter(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(common.values() )
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa["id"]
SCREAMING_SNAKE_CASE = [t for t in qa["answers"]["text"] if normalize_answer(UpperCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = [""]
if qid not in preds:
print(F"Missing prediction for {qid}" )
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
SCREAMING_SNAKE_CASE = max(compute_fa(UpperCAmelCase__ , UpperCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict=None ):
if not qid_list:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
plt.step(UpperCAmelCase__ , UpperCAmelCase__ , color="b" , alpha=0.2 , where="post" )
plt.fill_between(UpperCAmelCase__ , UpperCAmelCase__ , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase__ )
plt.savefig(UpperCAmelCase__ )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : str=None ):
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(UpperCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE = true_pos / float(UpperCAmelCase__ )
if i == len(UpperCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase__ )
recalls.append(UpperCAmelCase__ )
if out_image:
plot_pr_curve(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ):
if out_image_dir and not os.path.exists(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
SCREAMING_SNAKE_CASE = {k: float(UpperCAmelCase__ ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_exact" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_f1" )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "pr_oracle" )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(UpperCAmelCase__ ) / float(len(UpperCAmelCase__ ) )
plt.hist(UpperCAmelCase__ , weights=UpperCAmelCase__ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(UpperCAmelCase__ , F"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase__ ), best_thresh
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def __lowerCamelCase ():
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = dataset_json["data"]
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE = json.load(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(UpperCAmelCase__ ) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ )
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "HasAns" )
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__ )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
else:
print(json.dumps(UpperCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 206 | 1 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 0:
return False
_lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342 | 0 |
from __future__ import annotations
lowercase_ = 8.9_8_8E9 # units = N * m^s * C^-2
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
__snake_case : Optional[Any] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
__snake_case : Optional[Any] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__snake_case : Any = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__snake_case : List[str] = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__snake_case : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | from __future__ import annotations
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : str = []
__snake_case , __snake_case : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__snake_case : List[Any] = result + left + right
return input_list
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) <= 1:
return input_list
__snake_case : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
# iteration for two-way merging
__snake_case : Tuple = 2
while p <= len(__SCREAMING_SNAKE_CASE ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = i + p - 1
__snake_case : Optional[Any] = (low + high + 1) // 2
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# final merge of last two parts
if p * 2 >= len(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = i
__snake_case : str = merge(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 20 | 0 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | """simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self , __A ) -> str:
lowerCAmelCase_ :str = data
lowerCAmelCase_ :Optional[int] = None
def __str__( self ) -> str:
return f"""{self.data}"""
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ) -> None:
lowerCAmelCase_ :Tuple = None
def __iter__( self ) -> Iterator[T]:
lowerCAmelCase_ :Dict = self.top
while node:
yield node.data
lowerCAmelCase_ :Union[str, Any] = node.next
def __str__( self ) -> str:
return "->".join([str(__A ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def __lowerCAmelCase ( self ) -> bool:
return self.top is None
def __lowerCAmelCase ( self , __A ) -> None:
lowerCAmelCase_ :Tuple = Node(__A )
if not self.is_empty():
lowerCAmelCase_ :Union[str, Any] = self.top
lowerCAmelCase_ :Union[str, Any] = node
def __lowerCAmelCase ( self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __A )
lowerCAmelCase_ :Optional[Any] = self.top
lowerCAmelCase_ :Union[str, Any] = self.top.next
return pop_node.data
def __lowerCAmelCase ( self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def __lowerCAmelCase ( self ) -> None:
lowerCAmelCase_ :Tuple = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.