code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase__ ) * abs(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 101 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE : Union[str, Any] = 300 # TEMPERATURE (unit = K)
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , ) ->float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Union[str, Any] , A_ : List[Any] , A_ : Dict=1_3 , A_ : Any=7 , A_ : Tuple=True , A_ : Dict=True , A_ : str=True , A_ : Tuple=True , A_ : int=9_9 , A_ : List[Any]=2_4 , A_ : str=2 , A_ : int=6 , A_ : Optional[Any]=3_7 , A_ : Dict="gelu" , A_ : Tuple=0.1 , A_ : int=0.1 , A_ : List[Any]=5_1_2 , A_ : List[str]=1_6 , A_ : List[Any]=2 , A_ : Dict=0.02 , A_ : List[Any]=3 , A_ : Union[str, Any]=None , A_ : List[Any]=1_0_0_0 , ):
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[str] = use_input_mask
lowerCAmelCase_ : Optional[int] = use_token_type_ids
lowerCAmelCase_ : str = use_labels
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Dict = range_bbox
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ : Tuple = bbox[i, j, 3]
lowerCAmelCase_ : str = bbox[i, j, 1]
lowerCAmelCase_ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ : Optional[Any] = bbox[i, j, 2]
lowerCAmelCase_ : Union[str, Any] = bbox[i, j, 0]
lowerCAmelCase_ : str = t
lowerCAmelCase_ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowerCAmelCase_ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Tuple = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : List[str]):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[Any] , A_ : List[str] , A_ : Union[str, Any] , A_ : List[str] , A_ : Any , ):
lowerCAmelCase_ : Optional[Any] = LiltModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_)
lowerCAmelCase_ : Tuple = model(A_ , bbox=A_ , token_type_ids=A_)
lowerCAmelCase_ : Optional[int] = model(A_ , bbox=A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Optional[int] , A_ : Optional[int] , A_ : Optional[Any] , A_ : List[Any] , ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : Dict = LiltForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : List[Any] , A_ : int , A_ : str , A_ : Union[str, Any] , ):
lowerCAmelCase_ : Dict = LiltForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def UpperCAmelCase__ ( self : Tuple , A_ : Tuple , A_ : List[str] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : Dict):
return True
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Dict = LiltModelTester(self)
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : List[Any]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_)
@slow
def UpperCAmelCase__ ( self : Tuple):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = LiltModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(A_)
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2]] , device=A_)
lowerCAmelCase_ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_)
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Dict = model(input_ids=A_ , bbox=A_)
lowerCAmelCase_ : int = torch.Size([1, 2, 7_6_8])
lowerCAmelCase_ : Any = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A_ , )
self.assertTrue(outputs.last_hidden_state.shape , A_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3))
| 103 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : Dict ,lowercase__ : Dict=None ):
__lowercase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self ,lowercase__ ,getattr(lowercase__ ,lowercase__ ) )
__lowercase = module._original_module if isinstance(lowercase__ ,_PatchedModuleObj ) else module
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = []
def __init__( self : Optional[int] ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Optional[int]=None ):
__lowercase = obj
__lowercase = target
__lowercase = new
__lowercase = target.split('''.''' )[0]
__lowercase = {}
__lowercase = attrs or []
def __enter__( self : Tuple ):
*__lowercase , __lowercase = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase__ ) ):
try:
__lowercase = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase = getattr(self.obj ,lowercase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase__ ,_PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase = obj_attr
# patch at top level
setattr(self.obj ,lowercase__ ,_PatchedModuleObj(lowercase__ ,attrs=self.attrs ) )
__lowercase = getattr(self.obj ,lowercase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase__ ,lowercase__ ,_PatchedModuleObj(getattr(lowercase__ ,lowercase__ ,lowercase__ ) ,attrs=self.attrs ) )
__lowercase = getattr(lowercase__ ,lowercase__ )
# finally set the target attribute
setattr(lowercase__ ,lowercase__ ,self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase = getattr(import_module('''.'''.join(lowercase__ ) ) ,lowercase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj ,lowercase__ ) is attr_value:
__lowercase = getattr(self.obj ,lowercase__ )
setattr(self.obj ,lowercase__ ,self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase = globals()['''__builtins__'''][target_attr]
setattr(self.obj ,lowercase__ ,self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self : Optional[Any] ,*lowercase__ : int ):
for attr in list(self.original ):
setattr(self.obj ,lowercase__ ,self.original.pop(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 104 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 105 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 106 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=13 , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=99 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : str=37 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Any=5_12 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]="None" , __lowerCamelCase : str=3 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Any=None , ) -> int:
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_labels
a = num_choices
a = relative_attention
a = position_biased_input
a = pos_att_type
a = scope
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = None
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : str ) -> Union[str, Any]:
a = TFDebertaVaModel(config=__lowerCamelCase )
a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a = [input_ids, input_mask]
a = model(__lowerCamelCase )
a = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : int ) -> Optional[Any]:
a = TFDebertaVaForMaskedLM(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
a = self.num_labels
a = TFDebertaVaForSequenceClassification(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> Any:
a = self.num_labels
a = TFDebertaVaForTokenClassification(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
a = TFDebertaVaForQuestionAnswering(config=__lowerCamelCase )
a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Dict ) -> int:
a = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __UpperCAmelCase ( self : int ) -> int:
a = TFDebertaVaModelTester(self )
a = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> Dict:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Tuple:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def __UpperCAmelCase ( self : Any ) -> Tuple:
pass
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
a = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 )
| 107 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase__ = pytest.mark.integration
lowerCAmelCase__ = {'''comet'''}
lowerCAmelCase__ = importlib.util.find_spec('''fairseq''') is not None
lowerCAmelCase__ = {'''code_eval'''}
lowerCAmelCase__ = os.name == '''nt'''
lowerCAmelCase__ = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowerCAmelCase__ = importlib.util.find_spec('''transformers''') is not None
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Tuple , SCREAMING_SNAKE_CASE : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase , lowercase , lowercase )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
"""simple docstring"""
a : int ={}
a : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
lowerCAmelCase : List[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
lowerCAmelCase : Optional[Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase : Optional[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase : List[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
def load_local_metric(snake_case__ , *snake_case__ , **snake_case__ ):
return load_metric(os.path.join("metrics" , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase : Dict = load_local_metric
yield
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
def wrapper(snake_case__ ):
lowerCAmelCase : Dict = contextmanager(snake_case__ )
lowerCAmelCase : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
def load_from_checkpoint(SCREAMING_SNAKE_CASE : Optional[Any] ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
assert len(snake_case__ ) == 2
lowerCAmelCase : Union[str, Any] = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase : Tuple = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase : List[Any] = load_from_checkpoint
yield
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase : List[Any] = "ERROR"
lowerCAmelCase : Optional[int] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE , match=re.escape(SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE )
| 108 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
"""simple docstring"""
from math import pi, sqrt, tan
def _snake_case ( UpperCamelCase : float ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
UpperCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(UpperCamelCase , 2 ) * torus_radius * tube_radius
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _snake_case ( UpperCamelCase : float ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
UpperCAmelCase : Union[str, Any] = (sidea + sidea + sidea) / 2
UpperCAmelCase : Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 109 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__ = ''''''
else:
lowercase__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowercase__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[
: config.hidden_size, :
]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
lowercase__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowercase__ = 8
# set labels if required
if not base_model:
lowercase__ = 10_00
lowercase__ = '''huggingface/label-files'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowercase__ = 3_84
lowercase__ = 15_36
lowercase__ = 12
lowercase__ = 6
# load original model from torch hub
lowercase__ = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
lowercase__ = ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE ).eval()
else:
lowercase__ = ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
lowercase__ = ViTImageProcessor()
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = encoding['''pixel_values''']
lowercase__ = model(SCREAMING_SNAKE_CASE )
if base_model:
lowercase__ = original_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowercase__ = original_model(SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 110 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ ) -> int:
if len(A__ ) <= 1 or n <= 1:
return
insert_next(A__ , n - 1 )
rec_insertion_sort(A__ , n - 1 )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
if index >= len(A__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(A__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = input('''Enter integers separated by spaces: ''')
lowerCAmelCase__ : List[str] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 143 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
def snake_case ( UpperCAmelCase )-> str:
"""simple docstring"""
if any(not isinstance(A__ , A__ ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(A__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_snake_case : str = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[Any] ):
if isinstance(A__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
a_ = ['pixel_values']
def __init__( self : Any , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Dict , ) -> Dict:
super().__init__(**UpperCamelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_5_6}
__lowerCAmelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = offset
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> Tuple:
__lowerCAmelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(UpperCamelCase_ , size['shortest_edge'] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
__lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> Tuple:
__lowerCAmelCase = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> int:
__lowerCAmelCase = image.astype(np.floataa )
if offset:
__lowerCAmelCase = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> Union[str, Any]:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> List[Any]:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = to_numpy_array(UpperCamelCase_ )
if do_resize:
__lowerCAmelCase = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
__lowerCAmelCase = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
__lowerCAmelCase = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
__lowerCAmelCase = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
__lowerCAmelCase = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> str:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = offset if offset is not None else self.offset
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(UpperCamelCase_ , param_name='crop_size' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__lowerCAmelCase = make_batched(UpperCamelCase_ )
__lowerCAmelCase = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
__lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 284 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : List[Any]=99 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : List[Any]=5 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=None , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: int = batch_size
SCREAMING_SNAKE_CASE_: Optional[int] = seq_length
SCREAMING_SNAKE_CASE_: int = is_training
SCREAMING_SNAKE_CASE_: Any = use_input_mask
SCREAMING_SNAKE_CASE_: List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_: Optional[int] = use_labels
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_: str = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_multiple_size
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE_: str = attention_dropout
SCREAMING_SNAKE_CASE_: Tuple = weight_tying
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Dict = type_vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_: Tuple = num_labels
SCREAMING_SNAKE_CASE_: int = num_choices
SCREAMING_SNAKE_CASE_: List[str] = scope
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: str = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_: List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Any):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: str = True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: str = GPTNeoXJapaneseModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
SCREAMING_SNAKE_CASE_: Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)
SCREAMING_SNAKE_CASE_: List[Any] = model(UpperCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = GPTNeoXJapaneseModel(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: Tuple = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_: List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_: Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_: Any = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_: str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Any = output_from_no_past["hidden_states"][0]
SCREAMING_SNAKE_CASE_: Optional[int] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_: List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_: int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3))
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = config_and_inputs
SCREAMING_SNAKE_CASE_: List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCAmelCase : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase : Dict = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: int = GPTNeoXJapaneseModelTester(self)
SCREAMING_SNAKE_CASE_: Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
# This regression test was failing with PyTorch < 1.3
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_: int = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = "abeja/gpt-neox-japanese-2.7b"
SCREAMING_SNAKE_CASE_: List[Any] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
SCREAMING_SNAKE_CASE_: int = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
SCREAMING_SNAKE_CASE_: List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[int] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Optional[int] = []
for prompt in prompts:
SCREAMING_SNAKE_CASE_: Dict = tokenizer(UpperCamelCase_ , return_tensors="pt").input_ids
SCREAMING_SNAKE_CASE_: Dict = model.generate(UpperCamelCase_ , max_length=50)
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_)
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase (__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = 'speech_to_text_2'
_UpperCAmelCase :Dict = ['past_key_values']
_UpperCAmelCase :Tuple = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _UpperCAmelCase=10000 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=1024 , **_UpperCAmelCase , ):
lowercase__: Tuple = vocab_size
lowercase__: Union[str, Any] = d_model
lowercase__: Any = decoder_ffn_dim
lowercase__: List[Any] = decoder_layers
lowercase__: int = decoder_attention_heads
lowercase__: str = dropout
lowercase__: int = attention_dropout
lowercase__: Optional[Any] = activation_dropout
lowercase__: List[Any] = activation_function
lowercase__: Optional[Any] = init_std
lowercase__: Dict = decoder_layerdrop
lowercase__: Any = use_cache
lowercase__: Optional[int] = decoder_layers
lowercase__: Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__: Dict = max_target_positions
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_lowerCAmelCase = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_lowerCAmelCase = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_lowerCAmelCase = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=3_3 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Optional[int] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : Any = num_choices
_lowerCAmelCase : Optional[int] = scope
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = None
if self.use_input_mask:
_lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
_lowerCAmelCase : str = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = EsmModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
_lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Dict = EsmForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : Union[str, Any] = EsmForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
_lowerCAmelCase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : str = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Tuple = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] = ()
__lowerCAmelCase : str = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Any = True
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = EsmModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def __UpperCamelCase ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : List[str] = EsmModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase : int = EsmEmbeddings(config=UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
_lowerCAmelCase : Optional[int] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
_lowerCAmelCase : Union[str, Any] = create_position_ids_from_input_ids(UpperCamelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
_lowerCAmelCase : List[Any] = EsmEmbeddings(config=UpperCamelCase_ )
_lowerCAmelCase : str = torch.empty(2 , 4 , 3_0 )
_lowerCAmelCase : int = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
_lowerCAmelCase : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
_lowerCAmelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@require_torch
class a_ (__lowerCamelCase ):
@slow
def __UpperCamelCase ( self ):
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_lowerCAmelCase : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ )[0]
_lowerCAmelCase : List[str] = 3_3
_lowerCAmelCase : Tuple = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
_lowerCAmelCase : str = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __UpperCamelCase ( self ):
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
_lowerCAmelCase : List[Any] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
_lowerCAmelCase : Dict = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 309 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = '▁'
__lowerCAmelCase : List[str] = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
__lowerCAmelCase : List[str] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
__lowerCAmelCase : Any = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
__lowerCAmelCase : Optional[int] = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
__lowerCAmelCase : Union[str, Any] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
a__ = ["input_ids"]
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_INIT_CONFIGURATION
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : str=False , UpperCamelCase__ : Union[str, Any]="utf8" , UpperCamelCase__ : int="[UNK]" , UpperCamelCase__ : Union[str, Any]="[SEP]" , UpperCamelCase__ : Optional[Any]="[PAD]" , UpperCamelCase__ : Dict="[CLS]" , UpperCamelCase__ : Any="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> Dict:
"""simple docstring"""
__magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , vocab_file=UpperCamelCase_ , encoding=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__magic_name__ = do_lower_case
__magic_name__ = sentencepiece_model_ckpt
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__magic_name__ = self.load_vocab(filepath=UpperCamelCase_ )
else:
__magic_name__ = {self.sp_model.id_to_piece(UpperCamelCase_ ): id for id in range(self.sp_model.get_piece_size() )}
__magic_name__ = {v: k for k, v in self.vocab.items()}
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if text is None:
return None
__magic_name__ = self.tokenize(UpperCamelCase_ )
__magic_name__ , __magic_name__ = """""", []
for i, ch in enumerate(UpperCamelCase_ ):
if ch in self.SP_CHAR_MAPPING:
__magic_name__ = self.SP_CHAR_MAPPING.get(UpperCamelCase_ )
else:
__magic_name__ = unicodedata.normalize("""NFKC""" , UpperCamelCase_ )
if self.is_whitespace(UpperCamelCase_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase_ ) )
__magic_name__ , __magic_name__ , __magic_name__ = normalized_text, [], 0
if self.do_lower_case:
__magic_name__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__magic_name__ = token[1:]
__magic_name__ = text[offset:].index(UpperCamelCase_ ) + offset
__magic_name__ = start + len(UpperCamelCase_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__magic_name__ = end
return token_mapping
@property
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return len(self.vocab )
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : List[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.__dict__.copy()
__magic_name__ = None
return state
def __setstate__( self : str , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ = {}
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase_ , UpperCamelCase_ ) for c in text) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=False , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Union[str, Any]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get("""enable_sampling""" ) is True:
__magic_name__ = True
if self.sp_model_kwargs.get("""alpha""" ) is not None:
__magic_name__ = self.sp_model_kwargs.get("""alpha""" )
if self.sp_model_kwargs.get("""nbest_size""" ) is not None:
__magic_name__ = self.sp_model_kwargs.get("""nbest_size""" )
if not enable_sampling:
__magic_name__ = self.sp_model.EncodeAsPieces(UpperCamelCase_ )
else:
__magic_name__ = self.sp_model.SampleEncodeAsPieces(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = []
for pi, piece in enumerate(UpperCamelCase_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase_ ) and pi != 0:
new_pieces.append(UpperCamelCase_ )
continue
else:
continue
__magic_name__ = 0
for i, chunk in enumerate(UpperCamelCase_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase_ ) or self.is_punct(UpperCamelCase_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase_ )
__magic_name__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__magic_name__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__magic_name__ = i
if len(UpperCamelCase_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self : List[Any] , UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
__magic_name__ = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def _lowercase ( self : List[Any] , UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
__magic_name__ = self.convert_ids_to_tokens(UpperCamelCase_ )
__magic_name__ = """""".join(UpperCamelCase_ ).replace(UpperCamelCase_ , """ """ ).strip()
return out_string
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(UpperCamelCase_ , self.vocab.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase_ , self.unk_token )
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : str=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
__magic_name__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[str]=None ) -> str:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=False ) -> Tuple:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> Union[str, Any]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase_ ) + 1) + [1] * (len(UpperCamelCase_ ) + 3)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self : Dict , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self : Dict , UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase_ ) == 1:
__magic_name__ = unicodedata.category(UpperCamelCase_ )
if cat == "Zs":
return True
return False
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = {}
with io.open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(UpperCamelCase_ ):
__magic_name__ = line.rstrip("""\n""" )
__magic_name__ = int(UpperCamelCase_ )
return token_to_idx
def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> int:
"""simple docstring"""
__magic_name__ = 0
if os.path.isdir(UpperCamelCase_ ):
__magic_name__ = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__magic_name__ = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__magic_name__ = token_index
writer.write(token + """\n""" )
index += 1
__magic_name__ = os.path.join(UpperCamelCase_ , """sentencepiece.bpe.model""" )
with open(UpperCamelCase_ , """wb""" ) as fi:
__magic_name__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (vocab_file,)
| 88 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''')
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''')
if not isinstance(A__ , A__) or not isinstance(A__ , A__):
raise ValueError('''the function is defined for non-negative integers''')
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''')
_a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_a = float(factorial(A__))
coefficient /= factorial(A__) * factorial(trials - successes)
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 211 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( UpperCamelCase__ ):
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( UpperCamelCase__ ):
return vector * sigmoid(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase__ : Any = logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
snake_case__ = 'AutoTokenizer'
snake_case__ = ['tokenizer']
snake_case__ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any]=None ):
super().__init__(UpperCamelCase_ )
UpperCAmelCase__ = speaker_embeddings
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Tuple="speaker_embeddings_path.json" ,**lowerCamelCase__ : Dict ):
if speaker_embeddings_dict_path is not None:
UpperCAmelCase__ = get_file_from_repo(
UpperCamelCase_ ,UpperCamelCase_ ,subfolder=kwargs.pop('subfolder' ,UpperCamelCase_ ) ,cache_dir=kwargs.pop('cache_dir' ,UpperCamelCase_ ) ,force_download=kwargs.pop('force_download' ,UpperCamelCase_ ) ,proxies=kwargs.pop('proxies' ,UpperCamelCase_ ) ,resume_download=kwargs.pop('resume_download' ,UpperCamelCase_ ) ,local_files_only=kwargs.pop('local_files_only' ,UpperCamelCase_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,UpperCamelCase_ ) ,revision=kwargs.pop('revision' ,UpperCamelCase_ ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(UpperCamelCase_ ,UpperCamelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
UpperCAmelCase__ = None
else:
with open(UpperCamelCase_ ) as speaker_embeddings_json:
UpperCAmelCase__ = json.load(UpperCamelCase_ )
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ ,**UpperCamelCase_ )
return cls(tokenizer=UpperCamelCase_ ,speaker_embeddings=UpperCamelCase_ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : int ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Optional[Any]="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Union[str, Any] ,):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ,'v2' ) ,exist_ok=UpperCamelCase_ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase__ = self._load_voice_preset(UpperCamelCase_ )
UpperCAmelCase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,UpperCamelCase_ ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=UpperCamelCase_ ,)
UpperCAmelCase__ = os.path.join(UpperCamelCase_ ,f'''{prompt_key}_{key}.npy''' )
UpperCAmelCase__ = tmp_dict
with open(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) ,'w' ) as fp:
json.dump(UpperCamelCase_ ,UpperCamelCase_ )
super().save_pretrained(UpperCamelCase_ ,UpperCamelCase_ ,**UpperCamelCase_ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : List[Any] ):
UpperCAmelCase__ = self.speaker_embeddings[voice_preset]
UpperCAmelCase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
UpperCAmelCase__ = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,UpperCamelCase_ ) ,cache_dir=kwargs.pop('cache_dir' ,UpperCamelCase_ ) ,force_download=kwargs.pop('force_download' ,UpperCamelCase_ ) ,proxies=kwargs.pop('proxies' ,UpperCamelCase_ ) ,resume_download=kwargs.pop('resume_download' ,UpperCamelCase_ ) ,local_files_only=kwargs.pop('local_files_only' ,UpperCamelCase_ ) ,use_auth_token=kwargs.pop('use_auth_token' ,UpperCamelCase_ ) ,revision=kwargs.pop('revision' ,UpperCamelCase_ ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' )
UpperCAmelCase__ = np.load(UpperCamelCase_ )
return voice_preset_dict
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Any ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]="pt" ,lowerCamelCase__ : str=256 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[Any]=False ,**lowerCamelCase__ : Dict ,):
if voice_preset is not None and not isinstance(UpperCamelCase_ ,UpperCamelCase_ ):
if (
isinstance(UpperCamelCase_ ,UpperCamelCase_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase__ = self._load_voice_preset(UpperCamelCase_ )
else:
if isinstance(UpperCamelCase_ ,UpperCamelCase_ ) and not voice_preset.endswith('.npz' ):
UpperCAmelCase__ = voice_preset + '.npz'
UpperCAmelCase__ = np.load(UpperCamelCase_ )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCamelCase_ ,**UpperCamelCase_ )
UpperCAmelCase__ = BatchFeature(data=UpperCamelCase_ ,tensor_type=UpperCamelCase_ )
UpperCAmelCase__ = self.tokenizer(
UpperCamelCase_ ,return_tensors=UpperCamelCase_ ,padding='max_length' ,max_length=UpperCamelCase_ ,return_attention_mask=UpperCamelCase_ ,return_token_type_ids=UpperCamelCase_ ,add_special_tokens=UpperCamelCase_ ,**UpperCamelCase_ ,)
if voice_preset is not None:
UpperCAmelCase__ = voice_preset
return encoded_text
| 98 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Dict = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'from_type\' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase__ ( __lowerCamelCase):
def __init__( self :Dict , *_A :Any , _A :int=None , _A :List[str]=None , **_A :str ) -> int:
'''simple docstring'''
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__A = eval_examples
__A = post_process_function
def lowercase_ ( self :Any , _A :Optional[Dataset] = None , _A :List[str]=None , _A :Optional[List[str]] = None , _A :str = "eval" , **_A :int , ) -> int:
'''simple docstring'''
__A = gen_kwargs.copy()
__A = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
__A = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
__A = gen_kwargs
__A = self.eval_dataset if eval_dataset is None else eval_dataset
__A = self.get_eval_dataloader(UpperCamelCase_ )
__A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__A = self.compute_metrics
__A = None
__A = time.time()
__A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__A = eval_loop(
UpperCamelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
__A = compute_metrics
__A = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__A = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__A = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__A = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
__A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__A = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowercase_ ( self :List[str] , _A :Dict , _A :Optional[Any] , _A :List[Any]=None , _A :str = "test" , **_A :Dict ) -> List[str]:
'''simple docstring'''
__A = gen_kwargs.copy()
__A = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
__A = self.compute_metrics
__A = None
__A = time.time()
__A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__A = eval_loop(
UpperCamelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
__A = compute_metrics
__A = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__A = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 'predict' )
__A = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
__A = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 161 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_snake_case : Any = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase :
"""simple docstring"""
a_ = PegasusConfig
a_ = {}
a_ = 'gelu'
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : List[Any]=9_9 , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : List[str]=3_7 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Tuple=2_0 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[int]=0 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def lowercase ( self : Any ) -> str:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowerCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> int:
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(UpperCamelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase , __lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
__lowerCAmelCase = model.decode(UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Optional[int]:
__lowerCAmelCase = 2_0
__lowerCAmelCase = model_class_name(UpperCamelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
__lowerCAmelCase , __lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__lowerCAmelCase = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
__lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Tuple, lowerCAmelCase_ : str, lowerCAmelCase_ : Tuple=None, lowerCAmelCase_ : Tuple=None, ):
if attention_mask is None:
__lowerCAmelCase = np.not_equal(A__, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a_ = True
a_ = False
a_ = False
a_ = False
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCAmelCase = FlaxPegasusModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowercase ( self : Dict ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCAmelCase = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Dict ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCAmelCase = model_class(UpperCamelCase_ )
__lowerCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('JIT Enabled' ):
__lowerCAmelCase = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCAmelCase = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase ( self : Tuple ) -> str:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCamelCase_ )
__lowerCAmelCase = np.ones((1, 1) )
__lowerCAmelCase = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
__lowerCAmelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
__lowerCAmelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ',
]
__lowerCAmelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
__lowerCAmelCase = tokenizer(UpperCamelCase_ , return_tensors='np' , truncation=UpperCamelCase_ , max_length=5_1_2 , padding=UpperCamelCase_ )
__lowerCAmelCase = model.generate(**UpperCamelCase_ , num_beams=2 ).sequences
__lowerCAmelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
assert tgt_text == decoded
| 284 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
import numpy
# List of input, output pairs
lowerCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCAmelCase : int = [2, 4, 1, 5]
lowerCAmelCase : Dict = len(train_data)
lowerCAmelCase : List[Any] = 0.009
def A_ ( _UpperCAmelCase , _UpperCAmelCase="train" ):
return calculate_hypothesis_value(A__ , A__ ) - output(
A__ , A__ )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = 0
for i in range(len(A__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A_ ( _UpperCAmelCase , _UpperCAmelCase=m ):
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
for i in range(A__ ):
if index == -1:
summation_value += _error(A__ )
else:
summation_value += _error(A__ ) * train_data[i][0][index]
return summation_value
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = summation_of_cost_derivative(A__ , A__ ) / m
return cost_derivative_value
def A_ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE_: Tuple = 0.0_0_0_0_0_2
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: Any = 0
while True:
j += 1
SCREAMING_SNAKE_CASE_: Any = [0, 0, 0, 0]
for i in range(0 , len(A__ ) ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A__ , A__ , atol=A__ , rtol=A__ , ):
break
SCREAMING_SNAKE_CASE_: Optional[int] = temp_parameter_vector
print(("Number of iterations:", j) )
def A_ ( ):
for i in range(len(A__ ) ):
print(("Actual output value:", output(A__ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(A__ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 13 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
import baseaa
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
return baseaa.baaencode(string.encode('''utf-8''' ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
return baseaa.baadecode(A__ ).decode('''utf-8''' )
if __name__ == "__main__":
__A = "Hello World!"
__A = baseaa_encode(test)
print(encoded)
__A = baseaa_decode(encoded)
print(decoded)
| 177 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
_lowerCAmelCase = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
_lowerCAmelCase = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
_lowerCAmelCase = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_lowerCAmelCase = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
_lowerCAmelCase = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
_lowerCAmelCase = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
_lowerCAmelCase = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
_lowerCAmelCase = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 298 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 309 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 88 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import requests
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = {'''Content-Type''': '''application/json'''}
_a = requests.post(A__ , json={'''text''': message_body} , headers=A__)
if response.status_code != 200:
_a = (
'''Request to slack returned an error '''
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(A__)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 211 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 163 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = BlipImageProcessor()
UpperCAmelCase__ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCAmelCase__ = BlipaProcessor(UpperCamelCase_ ,UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Dict ,**lowerCamelCase__ : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase_ ).tokenizer
def __lowerCAmelCase ( self : List[Any] ,**lowerCamelCase__ : Optional[int] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase_ ).image_processor
def __lowerCAmelCase ( self : Any ):
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
UpperCAmelCase__ = self.get_image_processor(do_normalize=UpperCamelCase_ ,padding_value=1.0 )
UpperCAmelCase__ = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=UpperCamelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCamelCase_ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BlipaProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(UpperCamelCase_ ,return_tensors='np' )
UpperCAmelCase__ = processor(images=UpperCamelCase_ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BlipaProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ )
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = processor(text=UpperCamelCase_ )
UpperCAmelCase__ = tokenizer(UpperCamelCase_ ,return_token_type_ids=UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BlipaProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ )
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=UpperCamelCase_ ,images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BlipaProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(UpperCamelCase_ )
UpperCAmelCase__ = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ ,UpperCamelCase_ )
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = BlipaProcessor(tokenizer=UpperCamelCase_ ,image_processor=UpperCamelCase_ )
UpperCAmelCase__ = 'lower newer'
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=UpperCamelCase_ ,images=UpperCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'input_ids', 'attention_mask'] )
| 98 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __snake_case ( __lowerCamelCase ):
__lowerCamelCase = DistilBertTokenizer
__lowerCamelCase = DistilBertTokenizerFast
__lowerCamelCase = True
@slow
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : int = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
snake_case__ : int = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
snake_case__ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
snake_case__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
snake_case__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 143 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a__ : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : Optional[int]=3_2 , lowerCAmelCase_ : Optional[Any]=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Union[str, Any]=1_6 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : Dict=4 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
def lowercase ( self : int ) -> Any:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase_ , )
return config, input_ids, attention_mask
def lowercase ( self : int ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase ( self : List[str] ) -> Any:
__lowerCAmelCase = FlaxDistilBertModelTester(self )
@slow
def lowercase ( self : Any ) -> List[str]:
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowerCAmelCase = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__lowerCAmelCase = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCAmelCase = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) )
| 284 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
SCREAMING_SNAKE_CASE_: List[Any] = str(bin(A__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: List[str] = str(bin(A__ ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Dict = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__A = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
return max(metric_fn(A__ , A__ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Dict = [line.strip() for line in open(A__ , '''r''' ).readlines()]
lowercase__: int = []
if args.gold_data_mode == "qa":
lowercase__: Any = pd.read_csv(A__ , sep='''\t''' , header=A__ )
for answer_list in data[1]:
lowercase__: Dict = ast.literal_eval(A__ )
answers.append(A__ )
else:
lowercase__: List[str] = [line.strip() for line in open(A__ , '''r''' ).readlines()]
lowercase__: str = [[reference] for reference in references]
lowercase__: Optional[Any] = 0
for prediction, ground_truths in zip(A__ , A__ ):
total += 1
em += metric_max_over_ground_truths(A__ , A__ , A__ )
fa += metric_max_over_ground_truths(A__ , A__ , A__ )
lowercase__: Any = 1_0_0.0 * em / total
lowercase__: Any = 1_0_0.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: List[str] = args.k
lowercase__: Any = [line.strip() for line in open(A__ , '''r''' ).readlines()]
lowercase__: Dict = [line.strip() for line in open(A__ , '''r''' ).readlines()]
lowercase__: Any = 0
for hypo, reference in zip(A__ , A__ ):
lowercase__: int = set(hypo.split('''\t''' )[:k] )
lowercase__: Any = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase__: Any = 1_0_0.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
def strip_title(__UpperCAmelCase ):
if title.startswith('''\"''' ):
lowercase__: List[Any] = title[1:]
if title.endswith('''\"''' ):
lowercase__: Optional[int] = title[:-1]
return title
lowercase__: Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A__ , return_tensors='''pt''' , padding=A__ , truncation=A__ , )['''input_ids'''].to(args.device )
lowercase__: List[Any] = rag_model.rag.question_encoder(A__ )
lowercase__: Dict = question_enc_outputs[0]
lowercase__: str = rag_model.retriever(
A__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
lowercase__: str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase__: int = []
for docs in all_docs:
lowercase__: Optional[int] = [strip_title(A__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(A__ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
with torch.no_grad():
lowercase__: Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A__ , return_tensors='''pt''' , padding=A__ , truncation=A__ )
lowercase__: Any = inputs_dict.input_ids.to(args.device )
lowercase__: Optional[Any] = inputs_dict.attention_mask.to(args.device )
lowercase__: Optional[int] = rag_model.generate( # rag_model overwrites generate
A__ , attention_mask=A__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowercase__: Dict = rag_model.retriever.generator_tokenizer.batch_decode(A__ , skip_special_tokens=A__ )
if args.print_predictions:
for q, a in zip(A__ , A__ ):
logger.info('''Q: {} - A: {}'''.format(A__ , A__ ) )
return answers
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
lowercase__: Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=A__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=A__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=A__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=A__ , type=A__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=A__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=A__ , type=A__ , required=A__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=A__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=A__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=A__ , type=A__ , required=A__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=A__ , type=A__ , required=A__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=A__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=A__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=A__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=A__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=A__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=5_0 , type=A__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
lowercase__: Tuple = parser.parse_args()
lowercase__: str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Tuple = {}
if args.model_type is None:
lowercase__: Dict = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
lowercase__: Any = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
lowercase__: Dict = args.n_docs
if args.index_name is not None:
lowercase__: Optional[int] = args.index_name
if args.index_path is not None:
lowercase__: List[Any] = args.index_path
else:
lowercase__: Optional[int] = BartForConditionalGeneration
lowercase__: Dict = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , A__ )
lowercase__: Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
lowercase__: Any = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(A__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(A__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
lowercase__: List[Any] = RagRetriever.from_pretrained(A__ , **A__ )
lowercase__: Any = model_class.from_pretrained(A__ , retriever=A__ , **A__ )
model.retriever.init_retrieval()
else:
lowercase__: List[Any] = model_class.from_pretrained(A__ , **A__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
lowercase__: Union[str, Any] = []
for line in tqdm(A__ ):
questions.append(line.strip() )
if len(A__ ) == args.eval_batch_size:
lowercase__: Any = evaluate_batch_fn(A__ , A__ , A__ )
preds_file.write('''\n'''.join(A__ ) + '''\n''' )
preds_file.flush()
lowercase__: Tuple = []
if len(A__ ) > 0:
lowercase__: Optional[Any] = evaluate_batch_fn(A__ , A__ , A__ )
preds_file.write('''\n'''.join(A__ ) )
preds_file.flush()
score_fn(A__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__A = get_args()
main(args)
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCamelCase_ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCamelCase_ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
UpperCamelCase_ = """zero2"""
UpperCamelCase_ = """zero3"""
UpperCamelCase_ = [ZEROa, ZEROa]
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ) -> List[str]:
_lowerCAmelCase : Tuple = parameterized.to_safe_name("""_""".join(str(A__ ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
UpperCamelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a_ (__lowerCamelCase ):
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
def __UpperCamelCase ( self , snake_case_ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = 1_0 , snake_case_ = True , snake_case_ = True , snake_case_ = True , ):
_lowerCAmelCase : Tuple = models[model]
_lowerCAmelCase : Dict = self.run_trainer(
stage=UpperCamelCase_ , model_name=UpperCamelCase_ , eval_steps=UpperCamelCase_ , num_train_epochs=1 , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
self.do_checks(UpperCamelCase_ )
return output_dir
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = 1_0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , ):
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir("""./xxx""" , after=UpperCamelCase_ )
_lowerCAmelCase : int = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(UpperCamelCase_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCAmelCase : Optional[Any] = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
_lowerCAmelCase : Any = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
_lowerCAmelCase : Union[str, Any] = self.get_launcher(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
return output_dir
def __UpperCamelCase ( self , snake_case_=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCAmelCase : Optional[int] = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 309 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
# Lint as: python3
import itertools
import os
import re
__lowerCAmelCase : Dict = re.compile(R'([A-Z]+)([A-Z][a-z])')
__lowerCAmelCase : Dict = re.compile(R'([a-z\d])([A-Z])')
__lowerCAmelCase : Optional[Any] = re.compile(R'(?<!_)_(?!_)')
__lowerCAmelCase : Tuple = re.compile(R'(_{2,})')
__lowerCAmelCase : Optional[Any] = R'^\w+(\.\w+)*$'
__lowerCAmelCase : Optional[Any] = R'<>:/\|?*'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = _uppercase_uppercase_re.sub(R"""\1_\2""", A__ )
__magic_name__ = _lowercase_uppercase_re.sub(R"""\1_\2""", A__ )
return name.lower()
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = _single_underscore_re.split(A__ )
__magic_name__ = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != """""" )
def a__ ( A_ ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(A__ )
def a__ ( A_, A_ ):
'''simple docstring'''
if os.path.basename(A__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, A__ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(A__ )}-{split}'''
def a__ ( A_, A_, A_, A_=None ):
'''simple docstring'''
__magic_name__ = filename_prefix_for_split(A__, A__ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
__magic_name__ = os.path.join(A__, A__ )
return f'''{filepath}*'''
def a__ ( A_, A_, A_, A_=None, A_=None ):
'''simple docstring'''
__magic_name__ = filename_prefix_for_split(A__, A__ )
__magic_name__ = os.path.join(A__, A__ )
if shard_lengths:
__magic_name__ = len(A__ )
__magic_name__ = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(A__ )]
if filetype_suffix:
__magic_name__ = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__magic_name__ = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 88 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
class __A :
'''simple docstring'''
def __init__(self , A ) -> Union[str, Any]:
"""simple docstring"""
_a = n
_a = [None] * self.n
_a = 0 # index of the first element
_a = 0
_a = 0
def __len__(self ) -> str:
"""simple docstring"""
return self.size
def a__ (self ) -> Any:
"""simple docstring"""
return self.size == 0
def a__ (self ) -> List[Any]:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
_a = data
_a = (self.rear + 1) % self.n
self.size += 1
return self
def a__ (self ) -> Any:
"""simple docstring"""
if self.size == 0:
raise Exception('''UNDERFLOW''' )
_a = self.array[self.front]
_a = None
_a = (self.front + 1) % self.n
self.size -= 1
return temp
| 211 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCamelCase ( UpperCamelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[Any] = 2
while True:
if is_prime(A__ ):
yield num
num += 1
def _UpperCamelCase ( UpperCamelCase__ = 2_0_0_0_0_0_0 ):
return sum(takewhile(lambda UpperCamelCase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 163 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase__ : Any = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase__ : int = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase__ : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase__ : Any = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase__ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase__ : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase__ : Union[str, Any] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase__ : Tuple = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase__ : List[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase__ : Dict = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase__ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase__ : Optional[Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase__ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase__ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase__ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase__ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase__ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_MAPPING
lowerCAmelCase__ : Dict = auto_class_update(FlaxAutoModel)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase__ : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase__ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ : str = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase__ : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase__ : List[str] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase__ : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase__ : str = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class snake_case ( _BaseAutoModelClass ):
"""simple docstring"""
snake_case__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase__ : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 98 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : List[Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
import os
import string
import sys
a__ : str = 1 << 8
a__ : Optional[int] = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 2_7,
"up": 6_5 + ARROW_KEY_FLAG,
"down": 6_6 + ARROW_KEY_FLAG,
"right": 6_7 + ARROW_KEY_FLAG,
"left": 6_8 + ARROW_KEY_FLAG,
"mod_int": 9_1,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 5_0,
"delete": 5_1,
"pg_up": 5_3,
"pg_down": 5_4,
}
a__ : Optional[int] = KEYMAP["up"]
a__ : int = KEYMAP["left"]
if sys.platform == "win32":
a__ : List[Any] = []
a__ : List[Any] = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a__ : Any = ord(str(i))
def snake_case ( )-> Dict:
"""simple docstring"""
if os.name == "nt":
import msvcrt
__A = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(A__ ) == 0:
# Read the keystroke
__A = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__A = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__A = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(A__ )
if ord(A__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
__A = chr(KEYMAP['esc'] )
except KeyError:
__A = cha[1]
else:
__A = ch.decode(A__ )
else:
__A = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__A = sys.stdin.fileno()
__A = termios.tcgetattr(A__ )
try:
tty.setraw(A__ )
__A = sys.stdin.read(1 )
finally:
termios.tcsetattr(A__ , termios.TCSADRAIN , A__ )
return ch
def snake_case ( )-> Union[str, Any]:
"""simple docstring"""
__A = get_raw_chars()
if ord(A__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(A__ ) == KEYMAP["esc"]:
__A = get_raw_chars()
if ord(A__ ) == KEYMAP["mod_int"]:
__A = get_raw_chars()
if ord(A__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(A__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(A__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 161 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size', [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size', ['default', 0, 100 * 2**20, 900 * 2**20] )
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : str, lowerCAmelCase_ : str ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, 'IN_MEMORY_MAX_SIZE', A__ )
__lowerCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase = dataset_size < in_memory_max_size
else:
__lowerCAmelCase = False
__lowerCAmelCase = is_small_dataset(A__ )
assert result == expected
| 284 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowercase ( __lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = 'bart'
_UpperCAmelCase : Dict = ['past_key_values']
_UpperCAmelCase : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , lowerCAmelCase__ : Any=5_0265 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=12 , lowerCAmelCase__ : Dict=4096 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Union[str, Any]=12 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : List[Any]=2 , **lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_: List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = d_model
SCREAMING_SNAKE_CASE_: int = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: int = encoder_layers
SCREAMING_SNAKE_CASE_: Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE_: List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = decoder_layers
SCREAMING_SNAKE_CASE_: List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: List[Any] = attention_dropout
SCREAMING_SNAKE_CASE_: List[Any] = activation_dropout
SCREAMING_SNAKE_CASE_: int = activation_function
SCREAMING_SNAKE_CASE_: Optional[int] = init_std
SCREAMING_SNAKE_CASE_: int = encoder_layerdrop
SCREAMING_SNAKE_CASE_: Any = decoder_layerdrop
SCREAMING_SNAKE_CASE_: List[Any] = classifier_dropout
SCREAMING_SNAKE_CASE_: Optional[Any] = use_cache
SCREAMING_SNAKE_CASE_: Tuple = encoder_layers
SCREAMING_SNAKE_CASE_: Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase_):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
"The config can simply be saved and uploaded again to be fixed.")
class __lowercase ( __lowerCamelCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_: Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
SCREAMING_SNAKE_CASE_: Optional[int] = {0: "batch"}
SCREAMING_SNAKE_CASE_: Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_: Dict = {0: "batch", 1: "decoder_sequence"}
SCREAMING_SNAKE_CASE_: List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE_: List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.num_layers
for i in range(UpperCamelCase_):
SCREAMING_SNAKE_CASE_: Dict = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_: Dict = {0: "batch", 2: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_: Tuple = super().outputs
else:
SCREAMING_SNAKE_CASE_: Optional[int] = super(UpperCamelCase_ , self).outputs
if self.use_past:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.num_layers
for i in range(UpperCamelCase_):
SCREAMING_SNAKE_CASE_: List[Any] = {0: "batch", 2: "past_sequence + sequence"}
SCREAMING_SNAKE_CASE_: Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# Generate decoder inputs
SCREAMING_SNAKE_CASE_: Dict = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE_: Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
SCREAMING_SNAKE_CASE_: str = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = dict(**UpperCamelCase_ , **UpperCamelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = common_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: List[str] = common_inputs["decoder_input_ids"].shape[1]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_: str = decoder_seq_length + 3
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_)] , dim=1)
SCREAMING_SNAKE_CASE_: Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.num_layers
SCREAMING_SNAKE_CASE_: Tuple = min(UpperCamelCase_ , UpperCamelCase_)
SCREAMING_SNAKE_CASE_: int = max(UpperCamelCase_ , UpperCamelCase_) - min_num_layers
SCREAMING_SNAKE_CASE_: Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
torch.zeros(UpperCamelCase_),
))
# TODO: test this.
SCREAMING_SNAKE_CASE_: Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)))
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Optional[int] = seqlen + 2
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.num_layers
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.num_attention_heads
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE_: List[Any] = common_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_)] , dim=1)
SCREAMING_SNAKE_CASE_: Tuple = [
(torch.zeros(UpperCamelCase_), torch.zeros(UpperCamelCase_)) for _ in range(UpperCamelCase_)
]
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_: Optional[Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.num_special_tokens_to_add(UpperCamelCase_)
SCREAMING_SNAKE_CASE_: Union[str, Any] = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_)
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_: List[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_: Any = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_))
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_: List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE_: Dict = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
else:
SCREAMING_SNAKE_CASE_: List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_)
return common_inputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE_: int = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
else:
SCREAMING_SNAKE_CASE_: List[str] = super(UpperCamelCase_ , self)._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
| 13 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (__lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = VideoToVideoSDPipeline
_UpperCAmelCase :Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {'image', 'width', 'height'}
_UpperCAmelCase :Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {'image'}
_UpperCAmelCase :Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
_UpperCAmelCase :Any = False
# No `output_type`.
_UpperCAmelCase :Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase__: Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
lowercase__: List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase__: List[Any] = CLIPTextModel(UpperCamelCase_ )
lowercase__: Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# 3 frames
lowercase__: Optional[int] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__: List[str] = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__: List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__: int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Optional[int] = self.get_dummy_components()
lowercase__: Tuple = VideoToVideoSDPipeline(**UpperCamelCase_ )
lowercase__: Optional[Any] = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__: Optional[int] = self.get_dummy_inputs(UpperCamelCase_ )
lowercase__: int = '''np'''
lowercase__: Dict = sd_pipe(**UpperCamelCase_ ).frames
lowercase__: int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
lowercase__: Optional[int] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Tuple = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
lowercase__: str = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__: List[Any] = torch.randn((1, 10, 3, 1024, 576) , generator=UpperCamelCase_ )
lowercase__: Optional[int] = video.to('''cuda''' )
lowercase__: Tuple = '''Spiderman is surfing'''
lowercase__: Optional[Any] = pipe(UpperCamelCase_ , video=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=3 , output_type='''pt''' ).frames
lowercase__: Tuple = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 177 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
__UpperCamelCase : List[Any] = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : str = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("" )
print(len(A__ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 298 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase_ = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
UpperCamelCase_ = None
def _UpperCAmelCase ( ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=A__ , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=A__ , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCAmelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Any:
def remove_articles(_lowerCamelCase : List[Any] ):
return ARTICLES_REGEX.sub(""" """ , A__ )
def white_space_fix(_lowerCamelCase : List[Any] ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : Optional[Any] ):
_lowerCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
if not s:
return []
return normalize_answer(A__ ).split()
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Any ) -> Optional[int]:
return int(normalize_answer(A__ ) == normalize_answer(A__ ) )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : str ) -> Tuple:
_lowerCAmelCase : str = get_tokens(A__ )
_lowerCAmelCase : Tuple = get_tokens(A__ )
_lowerCAmelCase : List[str] = collections.Counter(A__ ) & collections.Counter(A__ )
_lowerCAmelCase : Dict = sum(common.values() )
if len(A__ ) == 0 or len(A__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(A__ )
_lowerCAmelCase : Union[str, Any] = 1.0 * num_same / len(A__ )
_lowerCAmelCase : Any = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ) -> Tuple:
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
_lowerCAmelCase : List[str] = qa["""id"""]
_lowerCAmelCase : Optional[Any] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(A__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
_lowerCAmelCase : List[str] = [""""""]
if qid not in preds:
print(f'Missing prediction for {qid}' )
continue
_lowerCAmelCase : List[str] = preds[qid]
# Take max over all gold answers
_lowerCAmelCase : List[str] = max(compute_exact(A__ , A__ ) for a in gold_answers )
_lowerCAmelCase : Any = max(compute_fa(A__ , A__ ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Any ) -> Union[str, Any]:
_lowerCAmelCase : int = {}
for qid, s in scores.items():
_lowerCAmelCase : int = na_probs[qid] > na_prob_thresh
if pred_na:
_lowerCAmelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
_lowerCAmelCase : int = s
return new_scores
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str=None ) -> int:
if not qid_list:
_lowerCAmelCase : Optional[Any] = len(A__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
_lowerCAmelCase : Union[str, Any] = len(A__ )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> int:
for k in new_eval:
_lowerCAmelCase : Tuple = new_eval[k]
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> List[str]:
plt.step(A__ , A__ , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(A__ , A__ , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A__ )
plt.savefig(A__ )
plt.clf()
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None ) -> str:
_lowerCAmelCase : Optional[int] = sorted(A__ , key=lambda _lowerCamelCase : na_probs[k] )
_lowerCAmelCase : List[str] = 0.0
_lowerCAmelCase : str = 1.0
_lowerCAmelCase : Optional[int] = 0.0
_lowerCAmelCase : Union[str, Any] = [1.0]
_lowerCAmelCase : str = [0.0]
_lowerCAmelCase : Tuple = 0.0
for i, qid in enumerate(A__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
_lowerCAmelCase : Union[str, Any] = true_pos / float(i + 1 )
_lowerCAmelCase : Any = true_pos / float(A__ )
if i == len(A__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A__ )
recalls.append(A__ )
if out_image:
plot_pr_curve(A__ , A__ , A__ , A__ )
return {"ap": 100.0 * avg_prec}
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ) -> Optional[Any]:
if out_image_dir and not os.path.exists(A__ ):
os.makedirs(A__ )
_lowerCAmelCase : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
_lowerCAmelCase : List[str] = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
_lowerCAmelCase : Dict = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
_lowerCAmelCase : Union[str, Any] = {k: float(A__ ) for k, v in qid_to_has_ans.items()}
_lowerCAmelCase : str = make_precision_recall_eval(
A__ , A__ , A__ , A__ , out_image=os.path.join(A__ , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(A__ , A__ , """pr_exact""" )
merge_eval(A__ , A__ , """pr_f1""" )
merge_eval(A__ , A__ , """pr_oracle""" )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int ) -> List[Any]:
if not qid_list:
return
_lowerCAmelCase : Any = [na_probs[k] for k in qid_list]
_lowerCAmelCase : int = np.ones_like(A__ ) / float(len(A__ ) )
plt.hist(A__ , weights=A__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(A__ , f'na_prob_hist_{name}.png' ) )
plt.clf()
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : int ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
_lowerCAmelCase : int = num_no_ans
_lowerCAmelCase : Tuple = cur_score
_lowerCAmelCase : int = 0.0
_lowerCAmelCase : Tuple = sorted(A__ , key=lambda _lowerCamelCase : na_probs[k] )
for i, qid in enumerate(A__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
_lowerCAmelCase : int = scores[qid]
else:
if preds[qid]:
_lowerCAmelCase : Optional[int] = -1
else:
_lowerCAmelCase : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
_lowerCAmelCase : List[Any] = cur_score
_lowerCAmelCase : str = na_probs[qid]
return 100.0 * best_score / len(A__ ), best_thresh
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : int ) -> Any:
_lowerCAmelCase , _lowerCAmelCase : Dict = find_best_thresh(A__ , A__ , A__ , A__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = find_best_thresh(A__ , A__ , A__ , A__ )
_lowerCAmelCase : Union[str, Any] = best_exact
_lowerCAmelCase : List[str] = exact_thresh
_lowerCAmelCase : Optional[int] = best_fa
_lowerCAmelCase : Dict = fa_thresh
def _UpperCAmelCase ( ) -> List[Any]:
with open(OPTS.data_file ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(A__ )
_lowerCAmelCase : Tuple = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
_lowerCAmelCase : Optional[int] = json.load(A__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(A__ )
else:
_lowerCAmelCase : Union[str, Any] = {k: 0.0 for k in preds}
_lowerCAmelCase : Dict = make_qid_to_has_ans(A__ ) # maps qid to True/False
_lowerCAmelCase : Dict = [k for k, v in qid_to_has_ans.items() if v]
_lowerCAmelCase : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_raw_scores(A__ , A__ )
_lowerCAmelCase : Tuple = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
_lowerCAmelCase : Tuple = apply_no_ans_threshold(A__ , A__ , A__ , OPTS.na_prob_thresh )
_lowerCAmelCase : int = make_eval_dict(A__ , A__ )
if has_ans_qids:
_lowerCAmelCase : str = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , """HasAns""" )
if no_ans_qids:
_lowerCAmelCase : List[str] = make_eval_dict(A__ , A__ , qid_list=A__ )
merge_eval(A__ , A__ , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(A__ , A__ , A__ , A__ , A__ , A__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A__ , A__ , A__ , A__ , A__ , OPTS.out_image_dir )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(A__ , A__ , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(A__ , A__ )
else:
print(json.dumps(A__ , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 309 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
from __future__ import annotations
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = list(range(len(A__ ) ) )
__magic_name__ = [v / w for v, w in zip(A__, A__ )]
index.sort(key=lambda A_ : ratio[i], reverse=A__ )
__magic_name__ = 0
__magic_name__ = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__magic_name__ = 1
max_value += value[i]
capacity -= weight[i]
else:
__magic_name__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''')
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''')
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance)))),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _snake_case ( __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase :int = MobileBertTokenizer
lowerCAmelCase :Any = MobileBertTokenizerFast
lowerCAmelCase :int = True
lowerCAmelCase :Any = True
lowerCAmelCase :List[str] = filter_non_english
lowerCAmelCase :Any = 'google/mobilebert-uncased'
def snake_case__ ( self):
super().setUp()
UpperCAmelCase__ : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
UpperCAmelCase__ : Union[str, Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Dict = """unwanted, running"""
return input_text, output_text
def snake_case__ ( self):
UpperCAmelCase__ : str = self.tokenizer_class(self.vocab_file)
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize("""UNwant\u00E9d,running""")
self.assertListEqual(UpperCamelCase_ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [9, 6, 7, 12, 10, 11])
def snake_case__ ( self):
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_rust_tokenizer()
UpperCAmelCase__ : Optional[Any] = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCamelCase_)
UpperCAmelCase__ : Dict = rust_tokenizer.tokenize(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
UpperCAmelCase__ : Dict = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase_)
UpperCAmelCase__ : int = rust_tokenizer.encode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
# With lower casing
UpperCAmelCase__ : List[str] = self.get_tokenizer(do_lower_case=UpperCamelCase_)
UpperCAmelCase__ : str = self.get_rust_tokenizer(do_lower_case=UpperCamelCase_)
UpperCAmelCase__ : int = """UNwant\u00E9d,running"""
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCamelCase_)
UpperCAmelCase__ : Optional[Any] = rust_tokenizer.tokenize(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : str = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
UpperCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = tokenizer.encode(UpperCamelCase_)
UpperCAmelCase__ : str = rust_tokenizer.encode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
def snake_case__ ( self):
UpperCAmelCase__ : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = BasicTokenizer(do_lower_case=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase__ : Union[str, Any] = {}
for i, token in enumerate(UpperCamelCase_):
UpperCAmelCase__ : List[Any] = i
UpperCAmelCase__ : Union[str, Any] = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def snake_case__ ( self):
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def snake_case__ ( self):
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def snake_case__ ( self):
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCamelCase_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCamelCase_) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""")
UpperCAmelCase__ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_)
UpperCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCAmelCase__ : Dict = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
UpperCAmelCase__ : Any = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode_plus(
UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , )
UpperCAmelCase__ : List[Any] = tokenizer_r.do_lower_case if hasattr(UpperCamelCase_ , """do_lower_case""") else False
UpperCAmelCase__ : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def snake_case__ ( self):
UpperCAmelCase__ : int = ["""的""", """人""", """有"""]
UpperCAmelCase__ : List[Any] = """""".join(UpperCamelCase_)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
UpperCAmelCase__ : Optional[int] = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : str = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : int = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_)
UpperCAmelCase__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_)
UpperCAmelCase__ : Dict = tokenizer_r.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : int = tokenizer_p.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_)
UpperCAmelCase__ : int = tokenizer_r.convert_ids_to_tokens(UpperCamelCase_)
UpperCAmelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(UpperCamelCase_)
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase__ : Dict = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(UpperCamelCase_)
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 163 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
def a_ ( lowerCamelCase , lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 98 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
import argparse
import datetime
def UpperCamelCase__ ( A__ ) -> Tuple:
snake_case__ : str = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
snake_case__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(A__ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
snake_case__ : Optional[Any] = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
snake_case__ : List[str] = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
snake_case__ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
snake_case__ : List[str] = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
snake_case__ : Optional[Any] = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
snake_case__ : int = datetime.date(int(A__ ) , int(A__ ) , int(A__ ) )
# Start math
if m <= 2:
snake_case__ : Dict = y - 1
snake_case__ : Tuple = m + 12
# maths var
snake_case__ : int = int(str(A__ )[:2] )
snake_case__ : Union[str, Any] = int(str(A__ )[2:] )
snake_case__ : List[str] = int(2.6 * m - 5.3_9 )
snake_case__ : int = int(c / 4 )
snake_case__ : Optional[int] = int(k / 4 )
snake_case__ : List[str] = int(d + k )
snake_case__ : int = int(t + u + v + x )
snake_case__ : List[str] = int(z - (2 * c) )
snake_case__ : Optional[Any] = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
snake_case__ : Tuple = F"""Your date {date_input}, is a {days[str(A__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
zeller(args.date_input)
| 143 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
import math
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Any:
"""simple docstring"""
return math.pow(A__ , 2 ) - a
def snake_case ( UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
return 2 * x
def snake_case ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
__A = 2.0
while start <= a:
__A = math.pow(A__ , 2 )
return start
def snake_case ( UpperCAmelCase , UpperCAmelCase = 9_9_9_9 , UpperCAmelCase = 0.00_0000_0000_0001 )-> Union[str, Any]:
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
__A = get_initial_point(A__ )
for _ in range(A__ ):
__A = value
__A = value - fx(A__ , A__ ) / fx_derivative(A__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
from __future__ import annotations
from PIL import Image
# Define glider example
_snake_case : int = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_snake_case : Optional[int] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( lowerCAmelCase_ : list[list[int]] ):
__lowerCAmelCase = []
for i in range(len(A__ ) ):
__lowerCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def a_ ( lowerCAmelCase_ : list[list[int]], lowerCAmelCase_ : int ):
__lowerCAmelCase = []
for _ in range(A__ ):
# Create output image
__lowerCAmelCase = Image.new('RGB', (len(cells[0] ), len(A__ )) )
__lowerCAmelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCAmelCase = 255 - cells[y][x] * 255
__lowerCAmelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCAmelCase = new_generation(A__ )
return images
if __name__ == "__main__":
_snake_case : Optional[int] = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 284 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
lowerCAmelCase : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[str] = field(
default="cifar10" ,metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase :Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase :Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "The column name of the images in the files."} )
_UpperCAmelCase :Optional[str] = field(default=__lowerCamelCase ,metadata={"help": "A folder containing the training data."} )
_UpperCAmelCase :Optional[str] = field(default=__lowerCamelCase ,metadata={"help": "A folder containing the validation data."} )
_UpperCAmelCase :Optional[float] = field(
default=0.15 ,metadata={"help": "Percent to split off of train for validation."} )
_UpperCAmelCase :Optional[int] = field(
default=__lowerCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=__lowerCamelCase ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
def _snake_case ( self ):
lowercase__: List[Any] = {}
if self.train_dir is not None:
lowercase__: Any = self.train_dir
if self.validation_dir is not None:
lowercase__: Dict = self.validation_dir
lowercase__: Union[str, Any] = data_files if data_files else None
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
default=__lowerCamelCase ,metadata={
"help": (
"The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
_UpperCAmelCase :Optional[str] = field(
default=__lowerCamelCase ,metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=__lowerCamelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_UpperCAmelCase :str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase :str = field(default=__lowerCamelCase ,metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase :bool = field(
default=__lowerCamelCase ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
_UpperCAmelCase :float = field(
default=0.75 ,metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
_UpperCAmelCase :bool = field(
default=__lowerCamelCase ,metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase (__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :float = field(
default=1E-3 ,metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Any = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__, lowercase__, lowercase__: Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__, lowercase__, lowercase__: List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , A__ , A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: Dict = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__: Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase__: Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__: List[str] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
lowercase__: Tuple = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase__: Optional[int] = split['''train''']
lowercase__: List[str] = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__: Tuple = ViTMAEConfig.from_pretrained(model_args.config_name , **A__ )
elif model_args.model_name_or_path:
lowercase__: int = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **A__ )
else:
lowercase__: str = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__: Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **A__ )
elif model_args.model_name_or_path:
lowercase__: Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **A__ )
else:
lowercase__: Optional[Any] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__: Union[str, Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__: Union[str, Any] = ViTMAEForPreTraining(A__ )
if training_args.do_train:
lowercase__: int = ds['''train'''].column_names
else:
lowercase__: List[str] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase__: Optional[int] = data_args.image_column_name
elif "image" in column_names:
lowercase__: Any = '''image'''
elif "img" in column_names:
lowercase__: Optional[Any] = '''img'''
else:
lowercase__: Optional[int] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__: Optional[int] = image_processor.size['''shortest_edge''']
else:
lowercase__: List[str] = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase__: List[Any] = Compose(
[
Lambda(lambda __UpperCAmelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(A__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__UpperCAmelCase ):
lowercase__: Optional[Any] = [transforms(A__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__: Optional[Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__: Union[str, Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A__ )
# Compute absolute learning rate
lowercase__: int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__: Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
lowercase__: int = Trainer(
model=A__ , args=A__ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
lowercase__: Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowercase__: Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: List[str] = last_checkpoint
lowercase__: int = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__: Optional[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
# Write model card and (optionally) push to hub
lowercase__: List[Any] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
main()
if __name__ == "__main__":
main()
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCAmelCase = 500000
_lowerCAmelCase , _lowerCAmelCase = os.path.split(__file__)
_lowerCAmelCase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowerCAmelCase ( snake_case__ , **snake_case__ ):
__UpperCamelCase : List[str] = dataset.map(**A__ )
@get_duration
def __lowerCAmelCase ( snake_case__ , **snake_case__ ):
__UpperCamelCase : Tuple = dataset.filter(**A__ )
def __lowerCAmelCase ( ):
__UpperCamelCase : Union[str, Any] = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCamelCase : Optional[Any] = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
__UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(A__ , "dataset.arrow" ) , A__ , num_examples=A__ )
__UpperCamelCase : Tuple = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=A__ )
def tokenize(snake_case__ ):
return tokenizer(examples["text"] )
__UpperCamelCase : int = map(A__ )
__UpperCamelCase : List[Any] = map(A__ , batched=A__ )
__UpperCamelCase : Tuple = map(A__ , function=lambda snake_case__ : None , batched=A__ )
with dataset.formatted_as(type="numpy" ):
__UpperCamelCase : List[Any] = map(A__ , function=lambda snake_case__ : None , batched=A__ )
with dataset.formatted_as(type="pandas" ):
__UpperCamelCase : List[str] = map(A__ , function=lambda snake_case__ : None , batched=A__ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
__UpperCamelCase : Dict = map(A__ , function=lambda snake_case__ : None , batched=A__ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
__UpperCamelCase : Tuple = map(A__ , function=lambda snake_case__ : None , batched=A__ )
__UpperCamelCase : Dict = map(A__ , function=A__ , batched=A__ )
__UpperCamelCase : Dict = filter(A__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(A__ , "wb" ) as f:
f.write(json.dumps(A__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 298 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None ) -> Tuple:
if attention_mask is None:
_lowerCAmelCase : Optional[int] = tf.cast(tf.math.not_equal(A__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
__lowerCAmelCase : Tuple = OPTConfig
__lowerCAmelCase : Optional[Any] = {}
__lowerCAmelCase : int = 'gelu'
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=False , snake_case_=9_9 , snake_case_=1_6 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=2_0 , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=1_6 , snake_case_=1_6 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Optional[Any] = eos_token_id
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : int = bos_token_id
_lowerCAmelCase : Any = embed_dim
_lowerCAmelCase : List[Any] = word_embed_proj_dim
_lowerCAmelCase : List[Any] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCamelCase_ , **self.config_updates , )
_lowerCAmelCase : Any = prepare_opt_inputs_dict(UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = TFOPTModel(config=UpperCamelCase_ )
_lowerCAmelCase : int = inputs_dict["""input_ids"""]
_lowerCAmelCase : List[str] = input_ids[:1, :]
_lowerCAmelCase : Optional[int] = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase : Any = 1
# first forward pass
_lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
_lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
@require_tf
class a_ (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCAmelCase : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCAmelCase : str = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Any = 1_0
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = TFOPTModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case_ , snake_case_ ):
if hasattr(UpperCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCamelCase_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_lowerCAmelCase : Optional[int] = model_class(config=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = _get_word_embedding_weight(UpperCamelCase_ , model.get_input_embeddings() )
_lowerCAmelCase : List[str] = _get_word_embedding_weight(UpperCamelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCamelCase_ )
_lowerCAmelCase : List[str] = _get_word_embedding_weight(UpperCamelCase_ , model.get_input_embeddings() )
_lowerCAmelCase : Any = _get_word_embedding_weight(UpperCamelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCAmelCase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCamelCase_ )
# check that weights remain the same after resizing
_lowerCAmelCase : Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase : Optional[Any] = False
self.assertTrue(UpperCamelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCAmelCase : Any = False
self.assertTrue(UpperCamelCase_ )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> Any:
return tf.constant(A__ , dtype=tf.intaa )
@require_tf
class a_ (unittest.TestCase ):
__lowerCAmelCase : Dict = 9_9
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCAmelCase : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCAmelCase : Optional[int] = input_ids.shape[0]
_lowerCAmelCase : Tuple = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ (unittest.TestCase ):
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
_lowerCAmelCase : str = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_lowerCAmelCase : Dict = tf.not_equal(UpperCamelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowerCAmelCase : Dict = model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ).last_hidden_state
_lowerCAmelCase : Tuple = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , UpperCamelCase_ )
_lowerCAmelCase : int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=4E-3 ) )
_lowerCAmelCase : Optional[int] = tf.function(UpperCamelCase_ , jit_compile=UpperCamelCase_ )
_lowerCAmelCase : Tuple = xla_generate(UpperCamelCase_ , UpperCamelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=4E-2 ) )
@require_tf
@slow
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
super().setUp()
_lowerCAmelCase : Union[str, Any] = """facebook/opt-350m"""
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCAmelCase : Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCAmelCase : List[Any] = tokenizer(UpperCamelCase_ , return_tensors="""tf""" , padding=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : Dict = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCAmelCase : Optional[Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-4 ) )
_lowerCAmelCase : int = tf.function(UpperCamelCase_ , jit_compile=UpperCamelCase_ )
_lowerCAmelCase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-4 ) )
@require_tf
@slow
class a_ (unittest.TestCase ):
@property
def __UpperCamelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = """facebook/opt-125m"""
_lowerCAmelCase : Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
for prompt in self.prompts:
_lowerCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[Any] = model.generate(UpperCamelCase_ , max_length=1_0 )
_lowerCAmelCase : Any = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = """facebook/opt-350m"""
_lowerCAmelCase : str = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Any = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Dict = """left"""
# use different length sentences to test batching
_lowerCAmelCase : int = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_lowerCAmelCase : Tuple = tokenizer(UpperCamelCase_ , return_tensors="""tf""" , padding=UpperCamelCase_ )
_lowerCAmelCase : Any = inputs["""input_ids"""]
_lowerCAmelCase : int = model.generate(input_ids=UpperCamelCase_ , attention_mask=inputs["""attention_mask"""] )
_lowerCAmelCase : Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Dict = model.generate(input_ids=UpperCamelCase_ )
_lowerCAmelCase : int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
_lowerCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : Optional[int] = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_ )
_lowerCAmelCase : Dict = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """facebook/opt-350m"""
_lowerCAmelCase : str = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : str = GPTaTokenizer.from_pretrained(UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(UpperCamelCase_ )
for prompt in self.prompts:
_lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors="""tf""" ).input_ids
_lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_length=1_0 )
_lowerCAmelCase : str = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 309 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model')
__lowerCAmelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__lowerCAmelCase : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a__ = CamembertTokenizer
a__ = CamembertTokenizerFast
a__ = True
a__ = True
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """<pad>"""
__magic_name__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase_ ) , 1004 )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
__magic_name__ = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
__magic_name__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__magic_name__ = """I was born in 92000, and this is falsé."""
__magic_name__ = tokenizer.encode(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__magic_name__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = """I was born in 92000, and this is falsé."""
__magic_name__ = tokenizer.tokenize(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(UpperCamelCase_ )
__magic_name__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__magic_name__ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCamelCase_ , )
| 88 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase_ = 1.0_54_57_18_17e-34 # unit of ℏ : J * s
lowercase_ = 3e8 # unit of c : m * s^-1
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if (force, area, distance).count(0) != 1:
raise ValueError('''One and only one argument must be 0''')
if force < 0:
raise ValueError('''Magnitude of force can not be negative''')
if distance < 0:
raise ValueError('''Distance can not be negative''')
if area < 0:
raise ValueError('''Area can not be negative''')
if force == 0:
_a = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_a = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_a = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''')
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 163 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ : str = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
snake_case__ = 'blenderbot-small'
snake_case__ = ['past_key_values']
snake_case__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple ,lowerCamelCase__ : Dict=50_265 ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : int=2_048 ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Tuple=8 ,lowerCamelCase__ : Optional[int]=2_048 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Any=0.0 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[str]=512 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Dict=0.0 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Tuple=1 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : int=0 ,lowerCamelCase__ : List[Any]=1 ,lowerCamelCase__ : List[str]=2 ,lowerCamelCase__ : Union[str, Any]=2 ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,is_encoder_decoder=UpperCamelCase_ ,decoder_start_token_id=UpperCamelCase_ ,forced_eos_token_id=UpperCamelCase_ ,**UpperCamelCase_ ,)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ = {0: 'batch'}
UpperCAmelCase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
UpperCAmelCase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCAmelCase__ = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super().outputs
else:
UpperCAmelCase__ = super(UpperCamelCase_ ,self ).outputs
if self.use_past:
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
for i in range(UpperCamelCase_ ):
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCAmelCase__ = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# Generate decoder inputs
UpperCAmelCase__ = seq_length if not self.use_past else 1
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase__ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase__ = dict(**UpperCamelCase_ ,**UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
UpperCAmelCase__ = common_inputs['decoder_input_ids'].shape[1]
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = decoder_seq_length + 3
UpperCAmelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase__ = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase_ ,UpperCamelCase_ )] ,dim=1 )
UpperCAmelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ = min(UpperCamelCase_ ,UpperCamelCase_ )
UpperCAmelCase__ = max(UpperCamelCase_ ,UpperCamelCase_ ) - min_num_layers
UpperCAmelCase__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
UpperCAmelCase__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase_ ,UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ , UpperCAmelCase__ = self.num_layers
UpperCAmelCase__ , UpperCAmelCase__ = self.num_attention_heads
UpperCAmelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase__ = common_inputs['attention_mask'].dtype
UpperCAmelCase__ = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase_ ,UpperCamelCase_ ,dtype=UpperCamelCase_ )] ,dim=1 )
UpperCAmelCase__ = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
UpperCAmelCase__ = compute_effective_axis_dimension(
UpperCamelCase_ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(tokenizer(UpperCamelCase_ ,return_tensors=UpperCamelCase_ ) )
return common_inputs
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : PreTrainedTokenizer ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
elif self.task == "causal-lm":
UpperCAmelCase__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
else:
UpperCAmelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ ,batch_size=UpperCamelCase_ ,seq_length=UpperCamelCase_ ,is_pair=UpperCamelCase_ ,framework=UpperCamelCase_ )
return common_inputs
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase__ = super()._flatten_past_key_values_(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
else:
UpperCAmelCase__ = super(UpperCamelCase_ ,self )._flatten_past_key_values_(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
| 98 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def __a ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Tuple = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase_ )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = self.dummy_uncond_unet
snake_case__ : Optional[int] = DDIMScheduler()
snake_case__ : Any = self.dummy_vq_model
snake_case__ : Optional[Any] = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : List[str] = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type='numpy' ).images
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : Tuple = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type='numpy' , return_dict=UpperCamelCase_ )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case__ : str = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
snake_case__ : Optional[Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Any = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type='numpy' ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ : Union[str, Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
snake_case__ : Optional[int] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 143 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
a__ : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
a__ : Any = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
a__ : List[Any] = "▁"
class UpperCamelCase__ ( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self :List[Any] , _A :Union[str, Any] , _A :Dict="<s>" , _A :str="</s>" , _A :Tuple="</s>" , _A :Dict="<s>" , _A :int="<unk>" , _A :List[Any]="<pad>" , _A :Union[str, Any]="<mask>" , _A :Optional[Dict[str, Any]] = None , **_A :List[Any] , ) -> Dict:
'''simple docstring'''
__A = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
__A = len(self.sp_model ) - 1
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase_ ( self :Dict , _A :List[int] , _A :Optional[List[int]] = None ) -> Tuple:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self :Dict , _A :List[int] , _A :Optional[List[int]] = None , _A :bool = False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowercase_ ( self :int , _A :List[int] , _A :Optional[List[int]] = None ) -> str:
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self :str ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self :Dict , _A :str ) -> int:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowercase_ ( self :int , _A :Optional[int] ) -> Dict:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__A = self.sp_model.PieceToId(UpperCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def lowercase_ ( self :str , _A :int ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowercase_ ( self :Dict , _A :int ) -> str:
'''simple docstring'''
__A = []
__A = ''
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__A = True
__A = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__A = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self :str ) -> int:
'''simple docstring'''
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self :Optional[int] , _A :List[Any] ) -> List[Any]:
'''simple docstring'''
__A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self :Tuple , _A :str , _A :Optional[str] = None ) -> Any:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
UpperCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , 'wb' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 161 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
from ...configuration_utils import PretrainedConfig
_snake_case : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
a_ = 'tapas'
def __init__( self : Tuple , lowerCAmelCase_ : List[Any]=3_0_5_2_2 , lowerCAmelCase_ : Tuple=7_6_8 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : List[Any]=3_0_7_2 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[Any]=10.0 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Any="ratio" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=6_4 , lowerCAmelCase_ : Optional[Any]=3_2 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_sizes
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
__lowerCAmelCase = positive_label_weight
__lowerCAmelCase = num_aggregation_labels
__lowerCAmelCase = aggregation_loss_weight
__lowerCAmelCase = use_answer_as_supervision
__lowerCAmelCase = answer_loss_importance
__lowerCAmelCase = use_normalized_answer_loss
__lowerCAmelCase = huber_loss_delta
__lowerCAmelCase = temperature
__lowerCAmelCase = aggregation_temperature
__lowerCAmelCase = use_gumbel_for_cells
__lowerCAmelCase = use_gumbel_for_aggregation
__lowerCAmelCase = average_approximation_function
__lowerCAmelCase = cell_selection_preference
__lowerCAmelCase = answer_loss_cutoff
__lowerCAmelCase = max_num_rows
__lowerCAmelCase = max_num_columns
__lowerCAmelCase = average_logits_per_cell
__lowerCAmelCase = select_one_column
__lowerCAmelCase = allow_empty_column_selection
__lowerCAmelCase = init_cell_selection_weights_to_zero
__lowerCAmelCase = reset_position_index_per_cell
__lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
__lowerCAmelCase = aggregation_labels
__lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , UpperCamelCase_ ):
__lowerCAmelCase = {int(UpperCamelCase_ ): v for k, v in aggregation_labels.items()}
| 284 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
from string import ascii_uppercase
lowerCAmelCase : Union[str, Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if isinstance(A__ , A__ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(A__ , A__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(A__ , A__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
SCREAMING_SNAKE_CASE_: int = ""
SCREAMING_SNAKE_CASE_: int = 0
SCREAMING_SNAKE_CASE_: int = 0
while div != 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = divmod(A__ , A__ )
if base >= 11 and 9 < mod < 36:
SCREAMING_SNAKE_CASE_: str = ALPHABET_VALUES[str(A__ )]
else:
SCREAMING_SNAKE_CASE_: Tuple = str(A__ )
new_value += actual_value
SCREAMING_SNAKE_CASE_: Optional[Any] = num // base
SCREAMING_SNAKE_CASE_: str = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 13 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__A = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__A = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__A = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: str = simple_accuracy(A__ , A__ )
lowercase__: List[Any] = float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: int = float(pearsonr(A__ , A__ )[0] )
lowercase__: Union[str, Any] = float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '''
'''\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase_ , UpperCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", '''
'''\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]''' )
| 177 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( snake_case__ ):
def wrapper(*snake_case__ , **snake_case__ ):
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : Optional[Any] = func(*A__ , **A__ )
__UpperCamelCase : List[Any] = timeit.default_timer() - starttime
return delta
__UpperCamelCase : Optional[int] = func.__name__
return wrapper
def __lowerCAmelCase ( snake_case__ , snake_case__=100 , snake_case__=None ):
__UpperCamelCase : str = []
__UpperCamelCase : Dict = seq_shapes or {}
for i in range(A__ ):
__UpperCamelCase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A__ , _ArrayXD ):
__UpperCamelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A__ , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase : Optional[Any] = "The small grey turtle was surprisingly fast when challenged."
else:
__UpperCamelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(A__ , datasets.Sequence ):
while isinstance(A__ , datasets.Sequence ):
__UpperCamelCase : int = v.feature
__UpperCamelCase : Union[str, Any] = seq_shapes[k]
__UpperCamelCase : Dict = np.random.rand(*A__ ).astype(v.dtype )
__UpperCamelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=100 , snake_case__=None ):
__UpperCamelCase : Dict = generate_examples(A__ , num_examples=A__ , seq_shapes=A__ )
with ArrowWriter(features=A__ , path=A__ ) as writer:
for key, record in dummy_data:
__UpperCamelCase : Union[str, Any] = features.encode_example(A__ )
writer.write(A__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
__UpperCamelCase : int = datasets.Dataset.from_file(filename=A__ , info=datasets.DatasetInfo(features=A__ ) )
return dataset
| 298 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase_ = {
"""distilbert-base-uncased""": 5_12,
"""distilbert-base-uncased-distilled-squad""": 5_12,
"""distilbert-base-cased""": 5_12,
"""distilbert-base-cased-distilled-squad""": 5_12,
"""distilbert-base-german-cased""": 5_12,
"""distilbert-base-multilingual-cased""": 5_12,
}
UpperCamelCase_ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class a_ (__lowerCamelCase ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Dict = ['input_ids', 'attention_mask']
__lowerCAmelCase : Union[str, Any] = DistilBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
_lowerCAmelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : int = getattr(UpperCamelCase_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : Optional[int] = do_lower_case
_lowerCAmelCase : Optional[Any] = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**UpperCamelCase_ )
_lowerCAmelCase : int = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 309 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a__ = KandinskyVaaImgaImgPipeline
a__ = ['image_embeds', 'negative_image_embeds', 'image']
a__ = [
'image_embeds',
'negative_image_embeds',
'image',
]
a__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
a__ = False
@property
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
return 32
@property
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
return 32
@property
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
return 100
@property
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__magic_name__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.dummy_unet
__magic_name__ = self.dummy_movq
__magic_name__ = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__magic_name__ = DDIMScheduler(**UpperCamelCase_ )
__magic_name__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=0 ) -> int:
"""simple docstring"""
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__magic_name__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
__magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__magic_name__ = torch.manual_seed(UpperCamelCase_ )
else:
__magic_name__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__magic_name__ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """cpu"""
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
__magic_name__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__magic_name__ = output.images
__magic_name__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__magic_name__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__magic_name__ = """A red cartoon frog, 4k"""
__magic_name__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
__magic_name__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
__magic_name__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
__magic_name__ , __magic_name__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__magic_name__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
__magic_name__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 88 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowercase_ = logging.get_logger(__name__)
class __A ( __lowerCamelCase ):
'''simple docstring'''
__lowerCamelCase : int = 'vision-encoder-decoder'
__lowerCamelCase : Union[str, Any] = True
def __init__(self , **A ) -> List[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_a = kwargs.pop('''encoder''' )
_a = encoder_config.pop('''model_type''' )
_a = kwargs.pop('''decoder''' )
_a = decoder_config.pop('''model_type''' )
_a = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
_a = AutoConfig.for_model(UpperCamelCase_ , **UpperCamelCase_ )
_a = True
@classmethod
def a__ (cls , A , A , **A ) -> int:
"""simple docstring"""
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_a = True
_a = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase_ )
def a__ (self ) -> int:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.encoder.to_dict()
_a = self.decoder.to_dict()
_a = self.__class__.model_type
return output
class __A ( __lowerCamelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = version.parse('1.11' )
@property
def a__ (self ) -> Dict:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a__ (self ) -> int:
"""simple docstring"""
return 1E-4
@property
def a__ (self ) -> List[Any]:
"""simple docstring"""
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __A ( __lowerCamelCase ):
'''simple docstring'''
@property
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = OrderedDict()
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
_a = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def a__ (self , A , A = -1 , A = -1 , A = False , A = None , ) -> Union[str, Any]:
"""simple docstring"""
import torch
_a = OrderedDict()
_a = super().generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
_a , _a = dummy_input['''input_ids'''].shape
_a = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a = dummy_input.pop('''input_ids''' )
_a = dummy_input.pop('''attention_mask''' )
_a = torch.zeros(UpperCamelCase_ )
return common_inputs
class __A ( __lowerCamelCase ):
'''simple docstring'''
@property
def a__ (self ) -> int:
"""simple docstring"""
pass
def a__ (self , A ) -> List[Any]:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase_ )
def a__ (self , A , A , A = "default" ) -> Any:
"""simple docstring"""
_a = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase_ , UpperCamelCase_ )
| 211 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = sorted(zip(A__ , A__ ) , key=lambda UpperCamelCase__ : x[0] / x[1] , reverse=A__ )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase__ : int = list(accumulate(A__ ) )
UpperCAmelCase__ : List[Any] = bisect(A__ , A__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any]=99 ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : int=16 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : Union[str, Any]=32 ,lowerCamelCase__ : Any=4 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : str=30 ,lowerCamelCase__ : List[str]=0 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : Dict=None ,):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = decoder_seq_length
# For common tests
UpperCAmelCase__ = self.decoder_seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_model
UpperCAmelCase__ = d_model
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = decoder_start_token_id
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = None
UpperCAmelCase__ = decoder_seq_length
UpperCAmelCase__ = 2
UpperCAmelCase__ = 1
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2 )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
UpperCAmelCase__ = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,):
UpperCAmelCase__ = True
UpperCAmelCase__ = TrOCRDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
UpperCAmelCase__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCAmelCase__ = model(UpperCamelCase_ ,use_cache=UpperCamelCase_ )
UpperCAmelCase__ = model(UpperCamelCase_ )
UpperCAmelCase__ = model(UpperCamelCase_ ,use_cache=UpperCamelCase_ )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) )
self.parent.assertTrue(len(UpperCamelCase_ ) == len(UpperCamelCase_ ) + 1 )
UpperCAmelCase__ = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((2, 1) ,config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCAmelCase__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase__ = model(UpperCamelCase_ )['last_hidden_state']
UpperCAmelCase__ = model(UpperCamelCase_ ,past_key_values=UpperCamelCase_ )['last_hidden_state']
# select random slice
UpperCAmelCase__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1e-3 )
def __lowerCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
snake_case__ = (TrOCRForCausalLM,) if is_torch_available() else ()
snake_case__ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
snake_case__ = True
snake_case__ = False
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = TrOCRStandaloneDecoderModelTester(self ,is_training=UpperCamelCase_ )
UpperCAmelCase__ = ConfigTester(self ,config_class=UpperCamelCase_ )
def __lowerCAmelCase ( self : str ):
pass
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase_ )
def __lowerCAmelCase ( self : str ):
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
| 98 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCamelCase__ ( ) -> int:
snake_case__ : int = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
snake_case__ : Any = Dataset.from_dict(A__ )
return dataset
class __snake_case ( __lowerCamelCase ):
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = get_dataset()
snake_case__ : Optional[int] = make_duplicate_clusters(UpperCamelCase_ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : List[Any] = get_dataset()
snake_case__ , snake_case__ : Dict = deduplicate_dataset(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
print(UpperCamelCase_ )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCamelCase_ )
| 143 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCamelCase__ ( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = RoFormerTokenizer
UpperCAmelCase__ : Dict = RoFormerTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = True
def lowercase_ ( self :str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
def lowercase_ ( self :Optional[int] , **_A :List[str] ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase_ )
def lowercase_ ( self :Dict , **_A :str ) -> int:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase_ )
def lowercase_ ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
__A = '永和服装饰品有限公司,今天天气非常好'
__A = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
__A = self.get_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
__A = self.get_rust_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def lowercase_ ( self :Any ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase_ ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self :Any ) -> Optional[int]:
'''simple docstring'''
pass
| 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
import os
import sys
_snake_case : Any = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_snake_case : Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def a_ ( *lowerCAmelCase_ : Optional[int], **lowerCAmelCase_ : Optional[Any] ):
return AutoConfig.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a_ ( *lowerCAmelCase_ : Dict, **lowerCAmelCase_ : str ):
return AutoTokenizer.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoModel.__doc__ )
def a_ ( *lowerCAmelCase_ : Optional[int], **lowerCAmelCase_ : Any ):
return AutoModel.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a_ ( *lowerCAmelCase_ : List[str], **lowerCAmelCase_ : List[str] ):
return AutoModelForCausalLM.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a_ ( *lowerCAmelCase_ : Union[str, Any], **lowerCAmelCase_ : int ):
return AutoModelForMaskedLM.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a_ ( *lowerCAmelCase_ : Optional[int], **lowerCAmelCase_ : List[Any] ):
return AutoModelForSequenceClassification.from_pretrained(*A__, **A__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a_ ( *lowerCAmelCase_ : Optional[int], **lowerCAmelCase_ : int ):
return AutoModelForQuestionAnswering.from_pretrained(*A__, **A__ )
| 284 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase__:
def __init__( self: str , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: str ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__lowerCamelCase = model
__lowerCamelCase = kwargs.get("""model_save_dir""" , UpperCamelCase_ )
__lowerCamelCase = kwargs.get("""latest_model_name""" , UpperCamelCase_ )
def __call__( self: Dict , **UpperCamelCase_: Any ):
__lowerCamelCase = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Tuple=None , UpperCamelCase_: Tuple=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__lowerCamelCase = """CPUExecutionProvider"""
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: Optional[int] ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCamelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCamelCase = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
__lowerCamelCase = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[str, os.PathLike] , **UpperCamelCase_: Optional[Any] , ):
if os.path.isfile(UpperCamelCase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: Optional[Union[bool, str, None]] = None , UpperCamelCase_: Optional[Union[str, None]] = None , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional["ort.SessionOptions"] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
__lowerCamelCase = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
__lowerCamelCase = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
__lowerCamelCase = Path(UpperCamelCase_ ).parent
__lowerCamelCase = Path(UpperCamelCase_ ).name
__lowerCamelCase = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: Optional[int] , UpperCamelCase_: Union[str, Path] , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[str] = None , **UpperCamelCase_: int , ):
__lowerCamelCase = None
if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2:
__lowerCamelCase, __lowerCamelCase = model_id.split("""@""" )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 12 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : int = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 , _UpperCAmelCase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_: Any = AutoTokenizer.from_pretrained(A__ )
SCREAMING_SNAKE_CASE_: List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: Optional[Any] = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Any = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
model.eval()
SCREAMING_SNAKE_CASE_: Any = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict = model(**A__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
SCREAMING_SNAKE_CASE_: Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_: Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
SCREAMING_SNAKE_CASE_: Any = metric.compute()
return eval_metric["accuracy"]
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Union[str, Any] = config["lr"]
SCREAMING_SNAKE_CASE_: Dict = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Any = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: Optional[Any] = args.model_name_or_path
set_seed(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Any = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: int = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_: Optional[int] = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_: int = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: int = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
SCREAMING_SNAKE_CASE_: List[Any] = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = evaluate.load("glue" , "mrpc" )
SCREAMING_SNAKE_CASE_: Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE_: Optional[int] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_: Optional[int] = args.resume_from_checkpoint.split("epoch_" )[1]
SCREAMING_SNAKE_CASE_: Any = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE_: List[str] = int(A__ ) + 1
SCREAMING_SNAKE_CASE_: Tuple = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("resumed checkpoint performance:" , A__ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_: List[str] = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE_: Any = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
SCREAMING_SNAKE_CASE_: List[str] = model(**A__ )
SCREAMING_SNAKE_CASE_: Dict = outputs.loss
SCREAMING_SNAKE_CASE_: List[str] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE_: Dict = f"epoch_{epoch}"
SCREAMING_SNAKE_CASE_: List[str] = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] = evaluation_loop(A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE_: List[Any] = accuracy
SCREAMING_SNAKE_CASE_: str = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE_: Tuple = optimizer.param_groups[0]["lr"]
SCREAMING_SNAKE_CASE_: List[str] = epoch
SCREAMING_SNAKE_CASE_: int = overall_step
accelerator.print(f"epoch {epoch}:" , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=A__ , default=A__ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=A__ , default=A__ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=2 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE_: Dict = parser.parse_args()
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 13 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :str = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
class UpperCAmelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ['sentencepiece']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''sentencepiece'''] )
| 177 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ = get_logger(__name__)
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : str , A__ : Any , A__ : Dict , A__ : Any=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(A__ , A__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving model to {ckpt_dir}' )
__lowerCamelCase = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=A__ , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : Dict , A__ : int , A__ : List[str] , A__ : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__lowerCamelCase = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowerCamelCase = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading model from {input_model_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowerCamelCase = (
os.path.join(A__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__lowerCamelCase = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=A__ , storage_reader=dist_cp.FileSystemReader(A__ ) , planner=DefaultLoadPlanner() , )
__lowerCamelCase = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(A__ )
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] , A__ : str , A__ : Dict , A__ : Optional[Any] , A__ : Optional[int]=0 ):
'''simple docstring'''
os.makedirs(A__ , exist_ok=A__ )
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowerCamelCase = FSDP.optim_state_dict(A__ , A__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(A__ , A__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__lowerCamelCase = os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(A__ , exist_ok=A__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(A__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowerCamelCase__ ( A__ : int , A__ : List[str] , A__ : int , A__ : Any , A__ : Union[str, Any] , A__ : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowerCamelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowerCamelCase = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__lowerCamelCase = os.path.join(A__ , A__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__lowerCamelCase = torch.load(A__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__lowerCamelCase = (
os.path.join(A__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__lowerCamelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(A__ ) , )
__lowerCamelCase = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__lowerCamelCase = FSDP.optim_state_dict_to_load(A__ , A__ , A__ )
optimizer.load_state_dict(A__ )
| 12 | 0 |
'''simple docstring'''
from math import factorial
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
__UpperCamelCase : List[str] = real
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCamelCase : Any = [1] * rank
else:
__UpperCamelCase : List[str] = rank
def __repr__(self ) -> str:
return (
f"{self.real}+"
f"{'+'.join(str(UpperCamelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def a_ (self ) -> List[Any]:
__UpperCamelCase : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , UpperCamelCase_ )
def __add__(self , _UpperCAmelCase ) -> List[str]:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Dual(self.real + other , self.duals )
__UpperCamelCase : Optional[Any] = self.duals.copy()
__UpperCamelCase : Dict = other.duals.copy()
if len(UpperCamelCase_ ) > len(UpperCamelCase_ ):
o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ):
s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) )
__UpperCamelCase : Union[str, Any] = []
for i in range(len(UpperCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , UpperCamelCase_ )
A = __add__
def __sub__(self , _UpperCAmelCase ) -> Dict:
return self + other * -1
def __mul__(self , _UpperCAmelCase ) -> Optional[int]:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCamelCase : List[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , UpperCamelCase_ )
__UpperCamelCase : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , UpperCamelCase_ )
A = __mul__
def __truediv__(self , _UpperCAmelCase ) -> Any:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCamelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , UpperCamelCase_ )
raise ValueError
def __floordiv__(self , _UpperCAmelCase ) -> Tuple:
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__UpperCamelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , UpperCamelCase_ )
raise ValueError
def __pow__(self , _UpperCAmelCase ) -> List[str]:
if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase : Optional[Any] = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
if not callable(A__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(A__ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(A__ , A__ ):
raise ValueError("differentiate() requires an int as input for order" )
__UpperCamelCase : str = Dual(A__ , 1 )
__UpperCamelCase : List[str] = func(A__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase ( snake_case__ ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 298 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Tuple = ShapEImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = ['image']
UpperCAmelCase__ : int = ['image']
UpperCAmelCase__ : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : int = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: List[str] ):
return 32
@property
def lowerCAmelCase__ ( self: Any ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Dict ):
return 8
@property
def lowerCAmelCase__ ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self: Tuple ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: List[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict=0 ):
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""} )
__lowerCAmelCase : Optional[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for training."""} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""} )
__lowerCAmelCase : Optional[float] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""} )
__lowerCAmelCase : Optional[int] = field(
default=1_0_0_0_0 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
__lowerCAmelCase : Optional[float] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""} )
__lowerCAmelCase : Optional[str] = field(default="""cosine""" , metadata={"""help""": """Learning rate."""} )
__lowerCAmelCase : Optional[int] = field(
default=7_5_0 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
__lowerCAmelCase : Optional[int] = field(
default=1_6 , metadata={"""help""": """Number of gradient accumulation steps."""} )
__lowerCAmelCase : Optional[bool] = field(
default=__lowerCamelCase , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
__lowerCAmelCase : Optional[int] = field(default=5_0_0_0_0 , metadata={"""help""": """Maximum number of training steps."""} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__lowerCAmelCase : Optional[int] = field(default=1_0_2_4 , metadata={"""help""": """Sequence lengths used for training."""} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={"""help""": """Training seed."""} )
__lowerCAmelCase : Optional[int] = field(
default=1_0_2_4 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
__lowerCAmelCase : Optional[str] = field(
default=__lowerCamelCase , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
__lowerCAmelCase : Optional[bool] = field(default=__lowerCamelCase , metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""} )
__lowerCAmelCase : Optional[int] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
__lowerCAmelCase : Optional[int] = field(default=1_0_2_4 , metadata={"""help""": """Length of sequences to be evaluated."""} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""} )
__lowerCAmelCase : Optional[int] = field(default=__lowerCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
__lowerCAmelCase : Optional[int] = field(
default=__lowerCamelCase , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
__lowerCAmelCase : Optional[bool] = field(
default=__lowerCamelCase , metadata={"""help""": """Sample from the language model\'s output distribution."""} )
__lowerCAmelCase : Optional[float] = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""} )
__lowerCAmelCase : Optional[int] = field(default=2_5_6 , metadata={"""help""": """Maximum number of newly generated tokens."""} )
__lowerCAmelCase : Optional[int] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""} )
__lowerCAmelCase : Optional[float] = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
__lowerCAmelCase : Optional[int] = field(default=1_0 , metadata={"""help""": """Number of generations to run in parallel."""} )
__lowerCAmelCase : Optional[int] = field(
default=2_0_0 , metadata={"""help""": """Number of completions to generate for each sample."""} )
__lowerCAmelCase : Optional[int] = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""} )
__lowerCAmelCase : Optional[str] = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""} )
__lowerCAmelCase : Optional[str] = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
__lowerCAmelCase : Optional[int] = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class a_ :
__lowerCAmelCase : Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
__lowerCAmelCase : Optional[str] = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""} )
__lowerCAmelCase : Optional[int] = field(
default=1_0_0_0_0_0 , metadata={"""help""": """Number of files to save per JSON output file."""} )
__lowerCAmelCase : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__lowerCAmelCase : Optional[float] = field(
default=1_0_0_0 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
__lowerCAmelCase : Optional[float] = field(
default=1_0_0 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
__lowerCAmelCase : Optional[float] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
__lowerCAmelCase : Optional[float] = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
__lowerCAmelCase : Optional[float] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
__lowerCAmelCase : Optional[bool] = field(
default=__lowerCamelCase , metadata={"""help""": """If True, near-duplicate samples are removed."""} )
__lowerCAmelCase : Optional[float] = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
__lowerCAmelCase : Optional[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""} )
__lowerCAmelCase : Optional[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""} )
__lowerCAmelCase : Optional[int] = field(default=2_0_0_0_0_0 , metadata={"""help""": """Number of examples to train tokenizer on."""} )
__lowerCAmelCase : Optional[int] = field(
default=3_2_7_6_8 , metadata={"""help""": """Number of examples to train the tokenizer on."""} )
__lowerCAmelCase : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""} )
__lowerCAmelCase : Optional[bool] = field(default=__lowerCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
__lowerCAmelCase : Optional[str] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""} )
__lowerCAmelCase : Optional[int] = field(default=__lowerCamelCase , metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class a_ :
__lowerCAmelCase : Optional[str] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""} )
__lowerCAmelCase : Optional[str] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""} )
__lowerCAmelCase : Optional[str] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""} )
__lowerCAmelCase : Optional[bool] = field(default=__lowerCamelCase , metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 309 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
a__ = 'yolos'
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-12 , UpperCamelCase__ : Union[str, Any]=[512, 864] , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=100 , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Any=0.1 , **UpperCamelCase__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
__magic_name__ = num_detection_tokens
__magic_name__ = use_mid_position_embeddings
__magic_name__ = auxiliary_loss
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
a__ = version.parse("""1.11""" )
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return 1E-4
@property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 12
| 88 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCamelCase__( unittest.TestCase):
def __init__( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[Any]=56 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: str=True , UpperCamelCase_: str=99 , UpperCamelCase_: Tuple=32 , UpperCamelCase_: int=2 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Tuple=7 , UpperCamelCase_: Optional[int]="gelu_new" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: List[Any]=5_12 , UpperCamelCase_: Union[str, Any]=16 , UpperCamelCase_: int=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Union[str, Any]="block_sparse" , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Any=2 , UpperCamelCase_: int=3 , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: Optional[Any] ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[str] ):
super().test_hidden_states_output()
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained("""google/bigbird-roberta-base""" )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: Union[str, Any] ):
return model(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: List[str]="outputs" , UpperCamelCase_: List[str]=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("""outputs.attentions""" ):
return
else:
super().check_pt_flax_outputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( __lowerCamelCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = ['image_processor', 'tokenizer']
__lowerCamelCase : int = 'BlipImageProcessor'
__lowerCamelCase : Tuple = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , A , A ) -> int:
"""simple docstring"""
_a = False
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
_a = self.image_processor
def __call__(self , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> str:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_a = self.tokenizer
_a = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
return text_encoding
# add pixel_values
_a = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
if text is not None:
_a = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
else:
_a = None
if text_encoding is not None:
encoding_image_processor.update(UpperCamelCase_ )
return encoding_image_processor
def a__ (self , *A , **A ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def a__ (self , *A , **A ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 211 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
for _ in range(A__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__lowerCamelCase, __lowerCamelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase_ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 12 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class _snake_case :
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : int = data
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = None
def _UpperCamelCase ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
UpperCAmelCase__ : int = input("""Enter the value of the root node: """ ).strip().lower()
UpperCAmelCase__ : Any = queue.Queue()
UpperCAmelCase__ : str = TreeNode(int(A__ ) )
q.put(A__ )
while not q.empty():
UpperCAmelCase__ : Optional[int] = q.get()
UpperCAmelCase__ : str = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ : Tuple = input(A__ ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase__ : List[Any] = TreeNode(int(A__ ) )
UpperCAmelCase__ : Dict = left_node
q.put(A__ )
UpperCAmelCase__ : Dict = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ : int = input(A__ ).strip().lower() or """n"""
if check == "n":
return tree_node
UpperCAmelCase__ : Union[str, Any] = TreeNode(int(A__ ) )
UpperCAmelCase__ : List[Any] = right_node
q.put(A__ )
raise
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
UpperCAmelCase__ : Dict = queue.Queue()
q.put(A__ )
while not q.empty():
UpperCAmelCase__ : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
UpperCAmelCase__ : Any = queue.Queue()
q.put(A__ )
while not q.empty():
UpperCAmelCase__ : Optional[int] = []
while not q.empty():
UpperCAmelCase__ : Tuple = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(A__ )
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
UpperCAmelCase__ : str = []
UpperCAmelCase__ : str = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(A__ )
UpperCAmelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ : Dict = stack.pop()
# start to traverse its right child
UpperCAmelCase__ : Optional[int] = n.right
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : str = node
while n or stack:
while n:
stack.append(A__ )
UpperCAmelCase__ : Union[str, Any] = n.left
UpperCAmelCase__ : Tuple = stack.pop()
print(n.data , end=""",""" )
UpperCAmelCase__ : int = n.right
def _UpperCamelCase ( UpperCamelCase__ ):
if not isinstance(A__ , A__ ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ : Any = [], []
UpperCAmelCase__ : Optional[Any] = node
stacka.append(A__ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(A__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _UpperCamelCase ( UpperCamelCase__ = "" , UpperCamelCase__=5_0 , UpperCamelCase__="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ : str = divmod(width - len(A__ ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__A =build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 163 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a_ ( lowerCamelCase ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(A__ , '_dynamo' ):
return False
return isinstance(A__ , torch._dynamo.eval_frame.OptimizedModule )
def a_ ( lowerCamelCase , lowerCamelCase = True ):
UpperCAmelCase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
UpperCAmelCase__ = is_compiled_module(A__ )
if is_compiled:
UpperCAmelCase__ = model
UpperCAmelCase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__ , A__ ):
UpperCAmelCase__ = model.module
if not keep_fpaa_wrapper:
UpperCAmelCase__ = getattr(A__ , 'forward' )
UpperCAmelCase__ = model.__dict__.pop('_original_forward' , A__ )
if original_forward is not None:
while hasattr(A__ , '__wrapped__' ):
UpperCAmelCase__ = forward.__wrapped__
if forward == original_forward:
break
UpperCAmelCase__ = forward
if getattr(A__ , '_converted_to_transformer_engine' , A__ ):
convert_model(A__ , to_transformer_engine=A__ )
if is_compiled:
UpperCAmelCase__ = model
UpperCAmelCase__ = compiled_model
return model
def a_ ( ):
PartialState().wait_for_everyone()
def a_ ( lowerCamelCase , lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__ , A__ )
elif PartialState().local_process_index == 0:
torch.save(A__ , A__ )
@contextmanager
def a_ ( **lowerCamelCase ):
for key, value in kwargs.items():
UpperCAmelCase__ = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a_ ( lowerCamelCase ):
if not hasattr(A__ , '__qualname__' ) and not hasattr(A__ , '__name__' ):
UpperCAmelCase__ = getattr(A__ , '__class__' , A__ )
if hasattr(A__ , '__qualname__' ):
return obj.__qualname__
if hasattr(A__ , '__name__' ):
return obj.__name__
return str(A__ )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in source.items():
if isinstance(A__ , A__ ):
UpperCAmelCase__ = destination.setdefault(A__ , {} )
merge_dicts(A__ , A__ )
else:
UpperCAmelCase__ = value
return destination
def a_ ( lowerCamelCase = None ):
if port is None:
UpperCAmelCase__ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 98 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 0 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __snake_case :
def __init__( self , __UpperCamelCase=None , **__UpperCamelCase ) -> Any:
'''simple docstring'''
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
snake_case__ : int = model
snake_case__ : int = kwargs.get('model_save_dir' , UpperCamelCase_ )
snake_case__ : List[str] = kwargs.get('latest_model_name' , UpperCamelCase_ )
def __call__( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
snake_case__ : Tuple = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase_ , UpperCamelCase_ )
@staticmethod
def __a ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
snake_case__ : Tuple = 'CPUExecutionProvider'
return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ) -> Any:
'''simple docstring'''
snake_case__ : int = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case__ : str = self.model_save_dir.joinpath(self.latest_model_name )
snake_case__ : Dict = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case__ : int = self.model_save_dir.joinpath(UpperCamelCase_ )
if src_path.exists():
snake_case__ : str = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ )
try:
shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ )
except shutil.SameFileError:
pass
def __a ( self , __UpperCamelCase , **__UpperCamelCase , ) -> Dict:
'''simple docstring'''
if os.path.isfile(UpperCamelCase_ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
# saving model weights/files
self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __a ( cls , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase_ ):
snake_case__ : Dict = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
snake_case__ : List[Any] = Path(UpperCamelCase_ )
# load model from hub
else:
# download model
snake_case__ : List[Any] = hf_hub_download(
repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , )
snake_case__ : Optional[int] = Path(UpperCamelCase_ ).parent
snake_case__ : int = Path(UpperCamelCase_ ).name
snake_case__ : Dict = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ )
return cls(model=UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __a ( cls , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
snake_case__ : Tuple = None
if len(str(UpperCamelCase_ ).split('@' ) ) == 2:
snake_case__ , snake_case__ : str = model_id.split('@' )
return cls._from_pretrained(
model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
| 143 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
@register_to_config
def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ):
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 )
else:
__lowerCamelCase = [""""""] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = len(UpperCamelCase_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ )
__lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ )
__lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ):
__lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ )
__lowerCamelCase = torch.exp(UpperCamelCase_ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 12 | 0 |
'''simple docstring'''
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> List[Any]:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_0_0, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 161 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = DistilBertTokenizer
UpperCAmelCase__ : Dict = DistilBertTokenizerFast
UpperCAmelCase__ : Tuple = True
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__lowerCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 12 | 0 |
from PIL import Image
def a_ ( lowerCAmelCase_ : Image, lowerCAmelCase_ : float ):
def brightness(lowerCAmelCase_ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_snake_case : Dict = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 284 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase_ = get_tests_dir('fixtures')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
UpperCAmelCase_ = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = 0
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowerCamelCase = WavaVecaFeatureExtractor(**UpperCamelCase_ )
# save in new folder
model_config.save_pretrained(UpperCamelCase_ )
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
# make sure private variable is not incorrectly saved
__lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
with self.assertRaisesRegex(
UpperCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase__ ( self: Tuple ):
with self.assertRaisesRegex(
UpperCamelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , revision="""aaaaaa""" )
def lowerCAmelCase__ ( self: Optional[Any] ):
with self.assertRaisesRegex(
UpperCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase__ ( self: Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase__ ( self: Any ):
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCamelCase_ ):
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase__ ( self: Dict ):
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = True
try:
AutoConfig.register("""custom""" , UpperCamelCase_ )
AutoFeatureExtractor.register(UpperCamelCase_ , UpperCamelCase_ )
# If remote code is not set, the default is to use local
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=UpperCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(UpperCamelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 12 | 0 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[Any] = 'dummy_data'
_UpperCAmelCase :str = 'datasets'
_UpperCAmelCase :Tuple = False
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , ):
lowercase__: Tuple = 0
lowercase__: str = dataset_name
lowercase__: List[str] = cache_dir
lowercase__: Optional[Any] = use_local_dummy_data
lowercase__: Optional[int] = config
# download_callbacks take a single url as input
lowercase__: str = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase__: int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase__: List[str] = str(UpperCamelCase_ )
# to be downloaded
lowercase__: List[Any] = None
lowercase__: str = None
@property
def _snake_case ( self ):
if self._dummy_file is None:
lowercase__: str = self.download_dummy_data()
return self._dummy_file
@property
def _snake_case ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def _snake_case ( self ):
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def _snake_case ( self ):
lowercase__: int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase__: Any = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def _snake_case ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _snake_case ( self ):
if self._bucket_url is None:
lowercase__: List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def _snake_case ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def _snake_case ( self , _UpperCAmelCase , *_UpperCAmelCase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase__: str = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase__: List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self , _UpperCAmelCase , *_UpperCAmelCase ):
return self.download_and_extract(UpperCamelCase_ )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.download_and_extract(UpperCamelCase_ )
def _snake_case ( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
return path
def _snake_case ( self ):
return {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
lowercase__: Optional[Any] = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__: str = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
lowercase__: Union[str, Any] = single_urls
lowercase__: int = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
lowercase__: Any = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase__: str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase__: Dict = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , UpperCamelCase_ ) ) for url in data_url )
lowercase__: List[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase__: Union[str, Any] = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__: List[Any] = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase__: str = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
def _snake_case ( self , _UpperCAmelCase ):
def _iter_archive_members(_UpperCAmelCase ):
# this preserves the order of the members inside the ZIP archive
lowercase__: str = Path(self.dummy_file ).parent
lowercase__: Tuple = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase__: str = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
lowercase__: Tuple = Path(UpperCamelCase_ )
lowercase__: List[Any] = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open('''rb''' )
def _snake_case ( self , _UpperCAmelCase ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__: int = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 177 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase_ = get_logger(__name__)
class lowerCamelCase__:
UpperCAmelCase__ : List[Any] = 'dummy_data'
UpperCAmelCase__ : str = 'datasets'
UpperCAmelCase__ : Tuple = False
def __init__( self: Optional[Any] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Union[Version, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[List[Callable]] = None , ):
__lowerCamelCase = 0
__lowerCamelCase = dataset_name
__lowerCamelCase = cache_dir
__lowerCamelCase = use_local_dummy_data
__lowerCamelCase = config
# download_callbacks take a single url as input
__lowerCamelCase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCamelCase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCamelCase = str(UpperCamelCase_ )
# to be downloaded
__lowerCamelCase = None
__lowerCamelCase = None
@property
def lowerCAmelCase__ ( self: List[Any] ):
if self._dummy_file is None:
__lowerCamelCase = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self: str ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCamelCase = cached_path(
UpperCamelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase_ , force_extract=UpperCamelCase_ )
return os.path.join(UpperCamelCase_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self: Tuple ):
if self._bucket_url is None:
__lowerCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self: str ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict , *UpperCamelCase_: str ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCamelCase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCamelCase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return self.create_dummy_data_dict(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCamelCase_ , UpperCamelCase_ )
else:
return self.create_dummy_data_single(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] , *UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
return self.download_and_extract(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int , *UpperCamelCase_: List[str] , **UpperCamelCase_: str ):
return path
def lowerCAmelCase__ ( self: Dict ):
return {}
def lowerCAmelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for single_url in single_urls:
download_callback(UpperCamelCase_ )
else:
__lowerCamelCase = single_urls
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) ) for x in single_urls]
else:
__lowerCamelCase = single_urls
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(Path(UpperCamelCase_ ).name ) )
__lowerCamelCase = value
# make sure that values are unique
if all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowerCamelCase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ):
__lowerCamelCase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCamelCase = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCamelCase_ ) ) for url in data_url )
__lowerCamelCase = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowerCamelCase = [data_url[0]] * len(UpperCamelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCamelCase_ )
return dummy_data_list
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCamelCase = os.path.join(UpperCamelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCamelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
def lowerCAmelCase__ ( self: List[Any] ):
pass
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Dict ):
def _iter_archive_members(UpperCamelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
__lowerCamelCase = Path(self.dummy_file ).parent
__lowerCamelCase = path.relative_to(UpperCamelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowerCamelCase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCamelCase_ )
__lowerCamelCase = Path(UpperCamelCase_ )
__lowerCamelCase = _iter_archive_members(UpperCamelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCamelCase_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Dict ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase_ ):
if os.path.basename(UpperCamelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( __lowerCamelCase ):
'''simple docstring'''
A = 'gpt_neo'
A = ['past_key_values']
A = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self , _UpperCAmelCase=5_0_2_5_7 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=2_4 , _UpperCAmelCase=[[["global", "local"], 1_2]] , _UpperCAmelCase=1_6 , _UpperCAmelCase=None , _UpperCAmelCase=2_5_6 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=5_0_2_5_6 , _UpperCAmelCase=5_0_2_5_6 , **_UpperCAmelCase , ) -> Tuple:
__UpperCamelCase : Tuple = vocab_size
__UpperCamelCase : Union[str, Any] = max_position_embeddings
__UpperCamelCase : List[Any] = hidden_size
__UpperCamelCase : str = num_layers
__UpperCamelCase : Any = num_heads
__UpperCamelCase : List[str] = intermediate_size
__UpperCamelCase : Optional[Any] = window_size
__UpperCamelCase : int = activation_function
__UpperCamelCase : List[Any] = resid_dropout
__UpperCamelCase : Union[str, Any] = embed_dropout
__UpperCamelCase : int = attention_dropout
__UpperCamelCase : Optional[int] = classifier_dropout
__UpperCamelCase : Dict = layer_norm_epsilon
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : Union[str, Any] = bos_token_id
__UpperCamelCase : Any = eos_token_id
__UpperCamelCase : Dict = attention_types
__UpperCamelCase : str = self.expand_attention_types_params(UpperCamelCase_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
@staticmethod
def a_ (_UpperCAmelCase ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
import torch
__UpperCamelCase : str = input.size()
__UpperCamelCase : List[Any] = len(A__ )
__UpperCamelCase : Tuple = shape[dimension]
__UpperCamelCase : str = torch.arange(0 , A__ , A__ )
__UpperCamelCase : Tuple = torch.div(sizedim - size , A__ , rounding_mode="floor" ) + 1
__UpperCamelCase : List[str] = torch.arange(A__ ) + low_indices[:min_length][:, None]
__UpperCamelCase : str = [slice(A__ )] * rank
__UpperCamelCase : int = indices
__UpperCamelCase : Dict = input[s]
__UpperCamelCase : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
import torch
__UpperCamelCase : Union[str, Any] = torch.arange(1 , A__ )
__UpperCamelCase : int = torch.remainder(A__ , A__ )
__UpperCamelCase : Optional[Any] = remainders == 0
__UpperCamelCase : str = candidates[divisor_indices]
__UpperCamelCase : Dict = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="floor" )
class A ( __lowerCamelCase ):
'''simple docstring'''
@property
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs" )
__UpperCamelCase : Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCamelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a_ (self ) -> List[str]:
return self._config.num_heads
def a_ (self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ) -> int:
__UpperCamelCase : Tuple = super(UpperCamelCase_ , self ).generate_dummy_inputs(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase : Any = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : Optional[Any] = seqlen + 2
__UpperCamelCase : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase : Any = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers )
]
__UpperCamelCase : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
__UpperCamelCase : Dict = ordered_inputs["attention_mask"].dtype
__UpperCamelCase : int = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def a_ (self ) -> List[Any]:
return 1_3
| 298 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class a_ (__lowerCamelCase ):
__lowerCAmelCase : str = 'distilbert'
__lowerCAmelCase : Optional[Any] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=5_1_2 , snake_case_=False , snake_case_=6 , snake_case_=1_2 , snake_case_=7_6_8 , snake_case_=4 * 7_6_8 , snake_case_=0.1 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=0.02 , snake_case_=0.1 , snake_case_=0.2 , snake_case_=0 , **snake_case_ , ):
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = sinusoidal_pos_embds
_lowerCAmelCase : Tuple = n_layers
_lowerCAmelCase : List[str] = n_heads
_lowerCAmelCase : Tuple = dim
_lowerCAmelCase : int = hidden_dim
_lowerCAmelCase : Optional[Any] = dropout
_lowerCAmelCase : List[Any] = attention_dropout
_lowerCAmelCase : Tuple = activation
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Union[str, Any] = qa_dropout
_lowerCAmelCase : Any = seq_classif_dropout
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ )
class a_ (__lowerCamelCase ):
@property
def __UpperCamelCase ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 309 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : int
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( A__ : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(A__ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(A__ ) != count_coins(A__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(A__ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.left )
__lowerCamelCase, __lowerCamelCase = get_distrib(node.right )
__lowerCamelCase = 1 - left_distrib_excess
__lowerCamelCase = 1 - right_distrib_excess
__lowerCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(A__ )
+ abs(A__ )
)
__lowerCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(A__ , A__ )
return get_distrib(A__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
a__ = 'masked_bert'
def __init__( self : Dict , UpperCamelCase__ : int=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : str=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Optional[Any]="topK" , UpperCamelCase__ : List[str]="constant" , UpperCamelCase__ : Union[str, Any]=0.0 , **UpperCamelCase__ : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = hidden_act
__magic_name__ = intermediate_size
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = pruning_method
__magic_name__ = mask_init
__magic_name__ = mask_scale
| 88 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 | 0 |
'''simple docstring'''
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase_ = os.path.join(git_repo_path, "src", "transformers")
lowercase_ = "\n{0} = None\n"
lowercase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
lowercase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Dict:
"""simple docstring"""
_a = find_backend(''' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")''' )
self.assertIsNone(UpperCamelCase_ )
_a = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(UpperCamelCase_ , '''tokenizers''' )
_a = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(UpperCamelCase_ , '''tensorflow_text''' )
_a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(UpperCamelCase_ , '''sentencepiece_and_tokenizers''' )
_a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(UpperCamelCase_ , '''sentencepiece_and_tensorflow_text''' )
_a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(UpperCamelCase_ , '''sentencepiece_and_tokenizers_and_vision''' )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , UpperCamelCase_ )
self.assertIn('''tensorflow_text''' , UpperCamelCase_ )
self.assertIn('''sentencepiece_and_tokenizers''' , UpperCamelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase_ , '''\nCONSTANT = None\n''' )
_a = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
UpperCamelCase_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_a = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
'''
_a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , UpperCamelCase_ )
| 211 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int | float] , A__ : int , A__ : int ):
'''simple docstring'''
if len(A__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(A__ )
or left < -len(A__ )
or right >= len(A__ )
or right < -len(A__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__lowerCamelCase = (left + right) >> 1 # the middle
__lowerCamelCase = find_max(A__ , A__ , A__ ) # find max in range[left, mid]
__lowerCamelCase = find_max(A__ , mid + 1 , A__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 12 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = set()
# edges = list of graph's edges
UpperCAmelCase__ : List[str] = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 163 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
"""simple docstring"""
from typing import Any
def a_ ( lowerCamelCase ):
if not input_list:
return []
UpperCAmelCase__ = [input_list.count(A__ ) for value in input_list]
UpperCAmelCase__ = max(A__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(A__ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A__ )
return next_generation
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
for _ in range(A__ ):
# Create output image
__lowerCamelCase = Image.new("""RGB""" , (len(cells[0] ), len(A__ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A__ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 255 - cells[y][x] * 255
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A__ )
__lowerCamelCase = new_generation(A__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 12 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = logging.get_logger()
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ = True ) -> Optional[Any]:
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
snake_case__ : Dict = timm.create_model('levit_128s' , pretrained=A__ )
else:
snake_case__ : List[Any] = timm.create_model('levit_128' , pretrained=A__ )
if hidden_sizes == 192:
snake_case__ : str = timm.create_model('levit_192' , pretrained=A__ )
if hidden_sizes == 256:
snake_case__ : Dict = timm.create_model('levit_256' , pretrained=A__ )
if hidden_sizes == 384:
snake_case__ : Optional[int] = timm.create_model('levit_384' , pretrained=A__ )
from_model.eval()
snake_case__ : int = LevitForImageClassificationWithTeacher(A__ ).eval()
snake_case__ : int = OrderedDict()
snake_case__ : Optional[Any] = from_model.state_dict()
snake_case__ : List[Any] = list(from_model.state_dict().keys() )
snake_case__ : int = list(our_model.state_dict().keys() )
print(len(A__ ) , len(A__ ) )
for i in range(len(A__ ) ):
snake_case__ : Dict = weights[og_keys[i]]
our_model.load_state_dict(A__ )
snake_case__ : Optional[Any] = torch.randn((2, 3, 224, 224) )
snake_case__ : Tuple = from_model(A__ )
snake_case__ : str = our_model(A__ ).logits
assert torch.allclose(A__ , A__ ), "The model logits don't match the original one."
snake_case__ : Optional[Any] = name
print(A__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def UpperCamelCase__ ( A__ , A__ = None , A__ = True ) -> str:
snake_case__ : Union[str, Any] = 'imagenet-1k-id2label.json'
snake_case__ : List[str] = 1000
snake_case__ : Dict = (1, num_labels)
snake_case__ : Tuple = 'huggingface/label-files'
snake_case__ : int = num_labels
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
snake_case__ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()}
snake_case__ : Any = idalabel
snake_case__ : List[str] = {v: k for k, v in idalabel.items()}
snake_case__ : Tuple = partial(A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ )
snake_case__ : Union[str, Any] = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
snake_case__ : Dict = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , A__ , names_to_config[model_name] , A__ , A__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , A__ , A__ , A__ , A__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ : int = parser.parse_args()
lowerCAmelCase__ : str = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 143 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
__A = tempfile.mkdtemp()
# fmt: off
__A = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A = {'unk_token': '<unk>'}
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
__A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__A = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowercase_ ( self :List[str] , **_A :str ) -> Tuple:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase_ )
def lowercase_ ( self :Optional[Any] , **_A :str ) -> Dict:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase_ )
def lowercase_ ( self :Union[str, Any] , **_A :List[str] ) -> List[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowercase_ ( self :int ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self :int ) -> List[Any]:
'''simple docstring'''
__A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = self.get_image_processor()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__A = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__A = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A = self.get_image_processor(do_normalize=UpperCamelCase_ )
__A = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__A = self.prepare_image_inputs()
__A = image_processor(UpperCamelCase_ , return_tensors='np' )
__A = processor(images=UpperCamelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self :Dict ) -> List[str]:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__A = 'lower newer'
__A = processor(text=UpperCamelCase_ , return_tensors='np' )
__A = tokenizer(UpperCamelCase_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowercase_ ( self :Tuple ) -> List[Any]:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__A = 'lower newer'
__A = self.prepare_image_inputs()
__A = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowercase_ ( self :Optional[Any] ) -> Any:
'''simple docstring'''
__A = 'google/owlvit-base-patch32'
__A = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__A = ['cat', 'nasa badge']
__A = processor(text=UpperCamelCase_ )
__A = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowercase_ ( self :Any ) -> str:
'''simple docstring'''
__A = 'google/owlvit-base-patch32'
__A = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__A = [['cat', 'nasa badge'], ['person']]
__A = processor(text=UpperCamelCase_ )
__A = 16
__A = len(UpperCamelCase_ )
__A = max([len(UpperCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowercase_ ( self :Optional[int] ) -> int:
'''simple docstring'''
__A = 'google/owlvit-base-patch32'
__A = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__A = ['cat', 'nasa badge']
__A = processor(text=UpperCamelCase_ )
__A = 16
__A = inputs['input_ids']
__A = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__A = self.prepare_image_inputs()
__A = self.prepare_image_inputs()
__A = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowercase_ ( self :str ) -> Tuple:
'''simple docstring'''
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(UpperCamelCase_ )
__A = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 161 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.