code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
lowercase__ : Any = val
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : Any = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase__ : str = value
else:
lowercase__ : Optional[Any] = value
return new_state_dict
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
lowercase__ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Tuple = in_proj_weight[:2_56, :]
lowercase__ : Union[str, Any] = in_proj_bias[:2_56]
lowercase__ : List[str] = in_proj_weight[2_56:5_12, :]
lowercase__ : Any = in_proj_bias[2_56:5_12]
lowercase__ : Any = in_proj_weight[-2_56:, :]
lowercase__ : Union[str, Any] = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Any = in_proj_weight[:2_56, :]
lowercase__ : str = in_proj_bias[:2_56]
lowercase__ : Union[str, Any] = in_proj_weight[2_56:5_12, :]
lowercase__ : int = in_proj_bias[2_56:5_12]
lowercase__ : Optional[Any] = in_proj_weight[-2_56:, :]
lowercase__ : str = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ : Optional[Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ : int = in_proj_weight_cross_attn[:2_56, :]
lowercase__ : Dict = in_proj_bias_cross_attn[:2_56]
lowercase__ : Any = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ : Dict = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ : Dict = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ : Dict = in_proj_bias_cross_attn[-2_56:]
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ , lowercase__ : Tuple = image.size
lowercase__ : Optional[Any] = max(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Optional[Any] = 8_00 if '''detection''' in checkpoint_url else 10_00
lowercase__ : Any = target_max_size / current_max_size
lowercase__ : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
lowercase__ : List[Any] = F.to_tensor(__lowerCamelCase )
lowercase__ : Dict = F.normalize(__lowerCamelCase , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
logger.info('''Converting model...''' )
# load original state dict
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase__ : int = rename_backbone_keys(__lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : Tuple = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase__ : Optional[Any] = state_dict.pop(__lowerCamelCase )
lowercase__ : str = val
# create HuggingFace model and load state dict
lowercase__ : Dict = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ : int = 15
lowercase__ : int = 2
lowercase__ : Any = {0: '''table''', 1: '''table rotated'''}
lowercase__ : int = idalabel
lowercase__ : int = {v: k for k, v in idalabel.items()}
else:
lowercase__ : List[Any] = 1_25
lowercase__ : Optional[Any] = 6
lowercase__ : Dict = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
lowercase__ : List[str] = TableTransformerForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify our conversion
lowercase__ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase__ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=__lowerCamelCase )
lowercase__ : List[Any] = Image.open(__lowerCamelCase ).convert('''RGB''' )
lowercase__ : Any = normalize(resize(__lowerCamelCase , __lowerCamelCase ) ).unsqueeze(0 )
lowercase__ : int = model(__lowerCamelCase )
if "detection" in checkpoint_url:
lowercase__ : List[Any] = (1, 15, 3)
lowercase__ : Dict = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
lowercase__ : Any = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
lowercase__ : Optional[Any] = (1, 1_25, 7)
lowercase__ : int = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
lowercase__ : Tuple = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
lowercase__ : Optional[int] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(__lowerCamelCase )
image_processor.push_to_hub(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 16 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_UpperCAmelCase : Dict ={
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ernie_m"""
SCREAMING_SNAKE_CASE__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowercase = 2_5_0_0_0_2 , __lowercase = 7_6_8 , __lowercase = 1_2 , __lowercase = 1_2 , __lowercase = 3_0_7_2 , __lowercase = "gelu" , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_4 , __lowercase = 0.02 , __lowercase = 1 , __lowercase = 1e-05 , __lowercase=None , __lowercase=False , __lowercase=0.0 , **__lowercase , ) -> Tuple:
super().__init__(pad_token_id=__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : Any = is_decoder
lowerCAmelCase_ : List[Any] = act_dropout | 262 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def A ( _lowerCamelCase , _lowerCamelCase = 16 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCAmelCase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : Dict = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : List[str] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : str = 8
else:
_lowerCAmelCase : Optional[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
_lowerCAmelCase : int = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
_lowerCAmelCase : Tuple = 2
# New Code #
_lowerCAmelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : Tuple = config["lr"]
_lowerCAmelCase : List[str] = int(config["num_epochs"] )
_lowerCAmelCase : Any = int(config["seed"] )
_lowerCAmelCase : Union[str, Any] = int(config["batch_size"] )
_lowerCAmelCase : Union[str, Any] = evaluate.load("glue" , "mrpc" )
set_seed(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Any = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
_lowerCAmelCase : Dict = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_lowerCamelCase ):
_lowerCAmelCase : Tuple = model(**_lowerCamelCase )
_lowerCAmelCase : Dict = output.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Dict = model(**_lowerCamelCase )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
_lowerCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_lowerCamelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : int = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 300 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 1 |
import re
def a_ ( _A ) -> bool:
"""simple docstring"""
snake_case__ = re.compile(
R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' )
return bool(re.search(_A , _A ) )
if __name__ == "__main__":
__UpperCamelCase : Any = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 307 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "gptsan-japanese"
_UpperCAmelCase = [
"past_key_values",
]
_UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self: Optional[Any] , UpperCamelCase: List[str]=3_60_00 , UpperCamelCase: List[str]=12_80 , UpperCamelCase: List[Any]=10_24 , UpperCamelCase: Any=81_92 , UpperCamelCase: Dict=40_96 , UpperCamelCase: Optional[int]=1_28 , UpperCamelCase: Any=10 , UpperCamelCase: List[Any]=0 , UpperCamelCase: Dict=16 , UpperCamelCase: Tuple=16 , UpperCamelCase: Union[str, Any]=1_28 , UpperCamelCase: List[Any]=0.0 , UpperCamelCase: Union[str, Any]=1e-5 , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=0.0 , UpperCamelCase: Dict="float32" , UpperCamelCase: Any=False , UpperCamelCase: Dict=False , UpperCamelCase: List[str]=False , UpperCamelCase: Union[str, Any]=0.002 , UpperCamelCase: int=False , UpperCamelCase: str=True , UpperCamelCase: Dict=3_59_98 , UpperCamelCase: Optional[Any]=3_59_95 , UpperCamelCase: Optional[Any]=3_59_99 , **UpperCamelCase: Optional[int] , ) -> Optional[int]:
snake_case__ = vocab_size
snake_case__ = max_position_embeddings
snake_case__ = d_model
snake_case__ = d_ff
snake_case__ = d_ext
snake_case__ = d_spout
snake_case__ = num_switch_layers
snake_case__ = num_ext_layers
snake_case__ = num_switch_layers + num_ext_layers
snake_case__ = num_heads
snake_case__ = num_experts
snake_case__ = expert_capacity
snake_case__ = dropout_rate
snake_case__ = layer_norm_epsilon
snake_case__ = router_bias
snake_case__ = router_jitter_noise
snake_case__ = router_dtype
snake_case__ = router_ignore_padding_tokens
snake_case__ = output_hidden_states
snake_case__ = output_attentions
snake_case__ = initializer_factor
snake_case__ = output_router_logits
snake_case__ = use_cache
super().__init__(
separator_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , )
| 307 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCamelCase ( lowerCAmelCase__ : dict ):
return (data["data"], data["target"])
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ):
__a : Dict = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a : Any = xgb.predict(lowerCAmelCase__ )
__a : List[str] = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def __UpperCamelCase ( ):
__a : List[Any] = fetch_california_housing()
__a , __a : str = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a : List[Any] = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a : Union[str, Any] = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f"Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}" )
print(f"Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 90 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase__ ={'allegro/herbert-base-cased': 514}
lowercase__ ={}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = HerbertTokenizer
def __init__(self : str , snake_case_ : Optional[Any]=None , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : List[str]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Optional[Any]="<mask>" , snake_case_ : List[Any]="</s>" , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sep_token=snake_case_ , **snake_case_ , )
def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.cls_token_id]
__a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Optional[int] = [self.sep_token_id]
__a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 90 | 1 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCAmelCase_ = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 16 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
class __A ( A_ ):
'''simple docstring'''
pass
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
for shard in shards:
for i in range(__lowerCamelCase ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : int = int(os.environ['''RANK'''] )
lowercase__ : str = int(os.environ['''WORLD_SIZE'''] )
lowercase__ : List[Any] = ArgumentParser()
parser.add_argument('''--streaming''' , type=__lowerCamelCase )
parser.add_argument('''--local_rank''' , type=__lowerCamelCase )
parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 )
lowercase__ : int = parser.parse_args()
lowercase__ : Optional[Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]}
lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase )
if not streaming:
lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) )
lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase )
lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : str = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 16 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase_ :
def __init__( self : Optional[int] , A__ : List[str] ) -> Tuple:
_snake_case = data
_snake_case = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCamelCase_ ( A__ : List[str] , A__ : Union[str, Any] ) -> Optional[Any]:
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCamelCase_ ( self : List[Any] ) -> Optional[int]:
_snake_case = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
_snake_case = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCamelCase_ ( self : Tuple , A__ : List[str] ) -> Dict:
_snake_case = list(struct.unpack('''>16L''' , A__ ) ) + [0] * 64
for i in range(16 , 80 ):
_snake_case = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
_snake_case = self.padding()
_snake_case = self.split_blocks()
for block in self.blocks:
_snake_case = self.expand_block(A__ )
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
_snake_case = (b & c) | ((~b) & d)
_snake_case = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
_snake_case = b ^ c ^ d
_snake_case = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
_snake_case = (b & c) | (b & d) | (c & d)
_snake_case = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
_snake_case = b ^ c ^ d
_snake_case = 0XC_A_6_2_C_1_D_6
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = (
self.rotate(A__ , 5 ) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(A__ , 30 ),
c,
d,
)
_snake_case = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case_() -> List[str]:
"""simple docstring"""
_snake_case = b'''Test String'''
assert SHAaHash(_UpperCamelCase ).final_hash() == hashlib.shaa(_UpperCamelCase ).hexdigest() # noqa: S324
def snake_case_() -> str:
"""simple docstring"""
_snake_case = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
_snake_case = parser.parse_args()
_snake_case = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
_snake_case = f.read()
else:
_snake_case = bytes(_UpperCamelCase , '''utf-8''' )
print(SHAaHash(_UpperCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 | 0 |
import os
import string
import sys
A_ : Tuple = 1 << 8
A_ : List[Any] = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
A_ : Tuple = KEYMAP['up']
A_ : Dict = KEYMAP['left']
if sys.platform == "win32":
A_ : Optional[int] = []
A_ : int = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
A_ : str = ord(str(i))
def UpperCamelCase () -> List[Any]:
if os.name == "nt":
import msvcrt
A__ : Optional[Any] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCamelCase ) == 0:
# Read the keystroke
A__ : Any = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
A__ : List[str] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
A__ : Optional[int] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__lowerCamelCase )
if ord(__lowerCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
A__ : Dict = chr(KEYMAP["""esc"""] )
except KeyError:
A__ : List[Any] = cha[1]
else:
A__ : Dict = ch.decode(__lowerCamelCase )
else:
A__ : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
A__ : Dict = sys.stdin.fileno()
A__ : Union[str, Any] = termios.tcgetattr(__lowerCamelCase )
try:
tty.setraw(__lowerCamelCase )
A__ : str = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCamelCase , termios.TCSADRAIN , __lowerCamelCase )
return ch
def UpperCamelCase () -> Optional[Any]:
A__ : Union[str, Any] = get_raw_chars()
if ord(__lowerCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCamelCase ) == KEYMAP["esc"]:
A__ : Tuple = get_raw_chars()
if ord(__lowerCamelCase ) == KEYMAP["mod_int"]:
A__ : Tuple = get_raw_chars()
if ord(__lowerCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 192 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Any = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = FlaxViTModel(config=lowercase_ )
UpperCAmelCase_ : int = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Optional[Any] = (self.image_size, self.image_size)
UpperCAmelCase_ : List[Any] = (self.patch_size, self.patch_size)
UpperCAmelCase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Tuple = FlaxViTForImageClassification(config=lowercase_ )
UpperCAmelCase_ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Optional[int] = FlaxViTForImageClassification(lowercase_ )
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Tuple = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxViTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(lowercase_ )
UpperCAmelCase_ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Union[str, Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Tuple = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ : List[str] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 61 | 0 |
from __future__ import annotations
import queue
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]:
__lowerCAmelCase = data
__lowerCAmelCase = None
__lowerCAmelCase = None
def a_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
__lowerCAmelCase = input('Enter the value of the root node: ' ).strip().lower()
__lowerCAmelCase = queue.Queue()
__lowerCAmelCase = TreeNode(int(lowerCAmelCase_ ) )
q.put(lowerCAmelCase_ )
while not q.empty():
__lowerCAmelCase = q.get()
__lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
__lowerCAmelCase = input(lowerCAmelCase_ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCAmelCase = TreeNode(int(lowerCAmelCase_ ) )
__lowerCAmelCase = left_node
q.put(lowerCAmelCase_ )
__lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
__lowerCAmelCase = input(lowerCAmelCase_ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCAmelCase = TreeNode(int(lowerCAmelCase_ ) )
__lowerCAmelCase = right_node
q.put(lowerCAmelCase_ )
raise
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
print(node.data, end=',' )
pre_order(node.left )
pre_order(node.right )
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
in_order(node.left )
print(node.data, end=',' )
in_order(node.right )
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=',' )
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
__lowerCAmelCase = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
__lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
__lowerCAmelCase = queue.Queue()
q.put(lowerCAmelCase_ )
while not q.empty():
__lowerCAmelCase = []
while not q.empty():
__lowerCAmelCase = q.get()
print(node_dequeued.data, end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
__lowerCAmelCase = []
__lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=',' )
stack.append(lowerCAmelCase_ )
__lowerCAmelCase = n.left
# end of while means current node doesn't have left child
__lowerCAmelCase = stack.pop()
# start to traverse its right child
__lowerCAmelCase = n.right
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
__lowerCAmelCase = []
__lowerCAmelCase = node
while n or stack:
while n:
stack.append(lowerCAmelCase_ )
__lowerCAmelCase = n.left
__lowerCAmelCase = stack.pop()
print(n.data, end=',' )
__lowerCAmelCase = n.right
def a_ ( lowerCAmelCase_ : TreeNode ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not node:
return
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase = node
stacka.append(lowerCAmelCase_ )
while stacka: # to find the reversed order of post order, store it in stack2
__lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCAmelCase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=',' )
def a_ ( lowerCAmelCase_ : str = "", lowerCAmelCase_ : Tuple=50, lowerCAmelCase_ : Tuple="*" ):
if not s:
return "\n" + width * char
__lowerCAmelCase , __lowerCAmelCase = divmod(width - len(lowerCAmelCase_ ) - 2, 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
_snake_case : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 207 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207 | 1 |
def __snake_case ( _lowerCAmelCase : list ) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A_ : Optional[int] = grid[0]
for row_n in range(1 , len(_lowerCAmelCase ) ):
A_ : Optional[Any] = grid[row_n]
A_ : List[Any] = fill_row(_lowerCAmelCase , _lowerCAmelCase )
A_ : Dict = grid[row_n]
return grid[-1][-1]
def __snake_case ( _lowerCAmelCase : list , _lowerCAmelCase : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowerCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
def _lowerCamelCase( lowercase__ ) -> list:
'''simple docstring'''
def merge(lowercase__ , lowercase__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase__ ) <= 1:
return collection
__lowercase= len(lowercase__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 361 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90 |
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : int ):
'''simple docstring'''
__lowerCamelCase = list(range(len(A__ ) ) )
__lowerCamelCase = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
__lowerCamelCase = 0
__lowerCamelCase = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__lowerCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowerCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29 | 1 |
'''simple docstring'''
from math import loga
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_A , _A ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : str = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def UpperCamelCase_ ( A__ : List[Any] , A__ : str , A__ : Dict=8 ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : DDPMScheduler , lowerCamelCase : VQModel , ) -> Dict:
super().__init__()
self.register_modules(
unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
lowerCAmelCase_ : Any = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : int , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> Union[str, Any]:
if latents is None:
lowerCAmelCase_ : Optional[Any] = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase_ : Union[str, Any] = latents.to(lowerCamelCase )
lowerCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : List[Any] , lowerCamelCase : str=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase_ : Tuple = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Dict , lowerCamelCase : List[str]=0 ) -> Any:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase_ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase_ : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase_, lowerCAmelCase_ : Dict = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
lowerCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : List[Any] ) -> List[str]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self : List[Any] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase : torch.FloatTensor , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 5_12 , lowerCamelCase : int = 1_00 , lowerCamelCase : float = 4.0 , lowerCamelCase : int = 1 , lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , ) -> Union[str, Any]:
lowerCAmelCase_ : Dict = self._execution_device
lowerCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Any = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : Dict = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase_ : List[Any] = torch.cat(lowerCamelCase , dim=0 )
lowerCAmelCase_ : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCAmelCase_ : List[Any] = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
lowerCAmelCase_ : Any = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
lowerCAmelCase_ : int = hint.repeat_interleave(lowerCamelCase , dim=0 )
lowerCAmelCase_ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase )
lowerCAmelCase_ : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
lowerCAmelCase_ : Dict = self.scheduler.timesteps
lowerCAmelCase_ : Dict = self.movq.config.latent_channels
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = downscale_height_and_width(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
lowerCAmelCase_ : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : int = {"""image_embeds""": image_embeds, """hint""": hint}
lowerCAmelCase_ : Tuple = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase_, lowerCAmelCase_ : Tuple = noise_pred.chunk(2 )
lowerCAmelCase_, lowerCAmelCase_ : List[str] = variance_pred.chunk(2 )
lowerCAmelCase_ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase_, lowerCAmelCase_ : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : List[str] = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , )[0]
# post-processing
lowerCAmelCase_ : str = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase_ : Tuple = image * 0.5 + 0.5
lowerCAmelCase_ : Optional[int] = image.clamp(0 , 1 )
lowerCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : Any = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 89 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
print("""Loading config file...""" )
def flatten_yaml_as_dict(A__ : int , A__ : str="" , A__ : Dict="." ):
lowerCAmelCase_ : int = []
for k, v in d.items():
lowerCAmelCase_ : Any = parent_key + sep + k if parent_key else k
if isinstance(A__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() )
else:
items.append((new_key, v) )
return dict(A__ )
lowerCAmelCase_ : Optional[int] = argparse.Namespace()
with open(A__ , """r""" ) as yaml_file:
try:
lowerCAmelCase_ : Any = yaml.load(A__ , Loader=yaml.FullLoader )
lowerCAmelCase_ : Any = flatten_yaml_as_dict(A__ )
for k, v in flat_cfg.items():
setattr(A__ , A__ , A__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(A__ , str(A__ ) ) )
return config
def UpperCamelCase_ ( A__ : Optional[int] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = MobileViTVaConfig()
lowerCAmelCase_ : Any = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCAmelCase_ : Tuple = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : List[str] = 3_84
else:
lowerCAmelCase_ : Optional[int] = 2_56
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCAmelCase_ : int = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
lowerCAmelCase_ : int = 3_84
else:
lowerCAmelCase_ : int = 2_56
lowerCAmelCase_ : Optional[Any] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCAmelCase_ : Dict = 1_51
lowerCAmelCase_ : Any = 5_12
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Union[str, Any] = True
elif task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Any = 21
lowerCAmelCase_ : List[str] = 5_12
lowerCAmelCase_ : Union[str, Any] = """pascal-voc-id2label.json"""
lowerCAmelCase_ : int = True
# orig_config
lowerCAmelCase_ : Optional[int] = load_orig_config_file(A__ )
assert getattr(A__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCAmelCase_ : Union[str, Any] = getattr(A__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(A__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCAmelCase_ : List[Any] = getattr(A__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCAmelCase_ : List[str] = getattr(A__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCAmelCase_ : Optional[Any] = getattr(A__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
lowerCAmelCase_ : Optional[int] = getattr(A__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCAmelCase_ : Any = """huggingface/label-files"""
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Any = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = idalabel
lowerCAmelCase_ : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_ ( A__ : int , A__ : Dict , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = dct.pop(A__ )
lowerCAmelCase_ : Optional[int] = val
def UpperCamelCase_ ( A__ : int , A__ : int=False ):
'''simple docstring'''
if base_model:
lowerCAmelCase_ : List[Any] = """"""
else:
lowerCAmelCase_ : Any = """mobilevitv2."""
lowerCAmelCase_ : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCAmelCase_ : List[Any] = k[8:]
else:
lowerCAmelCase_ : Optional[Any] = k
if ".block." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCAmelCase_ : Any = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCAmelCase_ : Any = k_new.replace("""conv_1.""" , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCAmelCase_ : List[str] = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
lowerCAmelCase_ : Optional[Any] = [0, 1]
elif i == 4:
lowerCAmelCase_ : Any = [0, 1, 2, 3]
elif i == 5:
lowerCAmelCase_ : Any = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
lowerCAmelCase_ : Optional[int] = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
lowerCAmelCase_ : str = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
lowerCAmelCase_ : Dict = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCAmelCase_ : Any = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCAmelCase_ : int = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCAmelCase_ : List[Any] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCAmelCase_ : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCAmelCase_ : Optional[Any] = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCAmelCase_ : Dict = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCAmelCase_ : List[Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(A__ )
for k in keys_to_ignore:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any] , A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = get_mobilevitva_config(A__ , A__ )
# load original state_dict
lowerCAmelCase_ : int = torch.load(A__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCAmelCase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation(A__ ).eval()
lowerCAmelCase_ : Union[str, Any] = False
else:
lowerCAmelCase_ : List[Any] = MobileViTVaForImageClassification(A__ ).eval()
lowerCAmelCase_ : Optional[int] = False
# remove and rename some keys of load the original model
lowerCAmelCase_ : Tuple = checkpoint
remove_unused_keys(A__ )
lowerCAmelCase_ : List[Any] = create_rename_keys(A__ , base_model=A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ , A__ , A__ )
# load modified state_dict
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase_ : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = model(**A__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCAmelCase_ : Optional[Any] = outputs.logits
lowerCAmelCase_ : Tuple = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCAmelCase_ : int = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] )
assert torch.allclose(logits[0, :3] , A__ , atol=1E-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 89 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a ( *lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = list(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
lowercase__ = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a ( lowerCamelCase_ = None , lowerCamelCase_ = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(lowerCamelCase_ , starting_batch_size=lowerCamelCase_ )
lowercase__ = starting_batch_size
def decorator(*lowerCamelCase_ , **lowerCamelCase_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase__ = list(inspect.signature(lowerCamelCase_ ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase_ ) < (len(lowerCamelCase_ ) + 1):
lowercase__ = ''', '''.join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('''No executable batch size found, reached zero.''' )
try:
return function(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 207 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207 | 1 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: Optional[int] = [False] * len(_SCREAMING_SNAKE_CASE )
a__: int = [-1] * len(_SCREAMING_SNAKE_CASE )
def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = True
a__: List[Any] = c
for u in graph[v]:
if not visited[u]:
dfs(_SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(_SCREAMING_SNAKE_CASE , 0 )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 203 | """simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = PriorTransformer
a__ = """hidden_states"""
@property
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Union[str, Any] = 4
a__: Any = 8
a__: Optional[Any] = 7
a__: Tuple = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: Optional[int] = floats_tensor((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self , lowercase=0) -> str:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Optional[Any] = 4
a__: Optional[Any] = 8
a__: Union[str, Any] = 7
a__: Optional[Any] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: Tuple = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return (4, 8)
@property
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
return (4, 8)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
a__: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: Union[str, Any] = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(lowercase)
a__: Any = model(**self.dummy_input)[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__ , a__: Tuple = self.prepare_init_args_and_inputs_for_common()
a__: Any = self.model_class(**lowercase)
a__: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Tuple = [*signature.parameters.keys()]
a__: List[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy')
a__: str = model.to(lowercase)
if hasattr(lowercase , 'set_default_attn_processor'):
model.set_default_attn_processor()
a__: Dict = self.get_dummy_seed_input()
with torch.no_grad():
a__: str = model(**lowercase)[0]
a__: str = output[0, :5].flatten().cpu()
print(lowercase)
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a__: Any = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239])
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2))
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self , lowercase=1 , lowercase=7_68 , lowercase=77 , lowercase=0) -> int:
'''simple docstring'''
torch.manual_seed(lowercase)
a__: Union[str, Any] = batch_size
a__: List[str] = embedding_dim
a__: str = num_embeddings
a__: Tuple = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: List[str] = torch.randn((batch_size, embedding_dim)).to(lowercase)
a__: str = torch.randn((batch_size, num_embeddings, embedding_dim)).to(lowercase)
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
])
def lowerCamelCase_ ( self , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior')
model.to(lowercase)
a__: Optional[Any] = self.get_dummy_seed_input(seed=lowercase)
with torch.no_grad():
a__: Optional[int] = model(**lowercase)[0]
assert list(sample.shape) == [1, 7_68]
a__: List[str] = sample[0, :8].flatten().cpu()
print(lowercase)
a__: Union[str, Any] = torch.tensor(lowercase)
assert torch_all_close(lowercase , lowercase , atol=1e-3)
| 203 | 1 |
import math
from datetime import datetime, timedelta
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> datetime:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = year % 19
SCREAMING_SNAKE_CASE__ = year % 4
SCREAMING_SNAKE_CASE__ = year % 7
SCREAMING_SNAKE_CASE__ = math.floor(year / 1_00 )
SCREAMING_SNAKE_CASE__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE__ = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__UpperCamelCase , 4 , 18 )
else:
return datetime(__UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
__lowerCamelCase : List[str] = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 219 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
A__ : Optional[int] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) -> List[Any]:
if rng is None:
lowerCamelCase_ : Any =random.Random()
lowerCamelCase_ : List[str] =1
for dim in shape:
total_dims *= dim
lowerCamelCase_ : Optional[Any] =[]
for _ in range(lowerCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase_ : Union[str, Any] =np.array(lowerCamelCase__ , dtype=jnp.intaa ).reshape(lowerCamelCase__ )
return output
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=None ) -> Dict:
lowerCamelCase_ : Dict =ids_tensor(lowerCamelCase__ , vocab_size=2 , rng=lowerCamelCase__ )
# make sure that at least one token is attended to for each batch
lowerCamelCase_ : List[Any] =1
return attn_mask
@require_flax
class lowercase__ :
_UpperCAmelCase :Dict = None
_UpperCAmelCase :Dict = ()
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase_ : Union[str, Any] =2
lowerCamelCase_ : int =inputs["input_ids"].shape[-1] // 2
lowerCamelCase_ : List[str] =inputs["input_ids"][:max_batch_size, :sequence_length]
lowerCamelCase_ : Tuple =jnp.ones_like(snake_case__ )
lowerCamelCase_ : Union[str, Any] =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase_ : int =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase_ : List[Any] =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =self._get_input_ids_and_config()
lowerCamelCase_ : Union[str, Any] =False
lowerCamelCase_ : Union[str, Any] =max_length
lowerCamelCase_ : Dict =0
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Optional[Any] =model_class(snake_case__ )
lowerCamelCase_ : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase_ : Union[str, Any] =getattr(snake_case__ , snake_case__ )
lowerCamelCase_ : Any =pt_model_class(snake_case__ ).eval()
lowerCamelCase_ : Tuple =load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params )
lowerCamelCase_ : Optional[int] =flax_model.generate(snake_case__ ).sequences
lowerCamelCase_ : List[Any] =pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase_ : int =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : List[Any] =self._get_input_ids_and_config()
lowerCamelCase_ : List[str] =False
lowerCamelCase_ : int =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Any =model_class(snake_case__ )
lowerCamelCase_ : Union[str, Any] =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : List[str] =jit(model.generate )
lowerCamelCase_ : Optional[Any] =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Optional[int] =self._get_input_ids_and_config()
lowerCamelCase_ : int =True
lowerCamelCase_ : List[str] =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Any =model_class(snake_case__ )
lowerCamelCase_ : Optional[Any] =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : int =jit(model.generate )
lowerCamelCase_ : int =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Tuple =self._get_input_ids_and_config()
lowerCamelCase_ : Any =False
lowerCamelCase_ : List[str] =max_length
lowerCamelCase_ : List[Any] =2
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Dict =model_class(snake_case__ )
lowerCamelCase_ : Optional[Any] =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : Tuple =jit(model.generate )
lowerCamelCase_ : Optional[Any] =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Dict =self._get_input_ids_and_config()
lowerCamelCase_ : Any =False
lowerCamelCase_ : Any =max_length
lowerCamelCase_ : int =2
lowerCamelCase_ : Tuple =2
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Any =model_class(snake_case__ )
lowerCamelCase_ : int =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : List[str] =self._get_input_ids_and_config()
lowerCamelCase_ : List[str] =True
lowerCamelCase_ : Union[str, Any] =max_length
lowerCamelCase_ : str =0.8
lowerCamelCase_ : Dict =10
lowerCamelCase_ : Tuple =0.3
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : str =8
lowerCamelCase_ : Tuple =9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : int =model_class(snake_case__ )
lowerCamelCase_ : int =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : Tuple =jit(model.generate )
lowerCamelCase_ : Optional[Any] =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Union[str, Any] =self._get_input_ids_and_config()
lowerCamelCase_ : Dict =max_length
lowerCamelCase_ : List[str] =1
lowerCamelCase_ : str =8
lowerCamelCase_ : Dict =9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Optional[Any] =model_class(snake_case__ )
lowerCamelCase_ : Any =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : Optional[int] =jit(model.generate )
lowerCamelCase_ : List[Any] =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Any =self._get_input_ids_and_config()
lowerCamelCase_ : int =max_length
lowerCamelCase_ : List[Any] =2
lowerCamelCase_ : List[Any] =1
lowerCamelCase_ : List[str] =8
lowerCamelCase_ : Tuple =9
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Optional[int] =model_class(snake_case__ )
lowerCamelCase_ : List[Any] =model.generate(snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : int =jit(model.generate )
lowerCamelCase_ : Dict =jit_generate(snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : int =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ : str =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ : List[Any] =False
lowerCamelCase_ : List[Any] =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : str =model_class(snake_case__ )
lowerCamelCase_ : int =model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : List[Any] =jit(model.generate )
lowerCamelCase_ : Optional[Any] =jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ : Optional[Any] =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ : List[str] =True
lowerCamelCase_ : Tuple =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : List[str] =model_class(snake_case__ )
lowerCamelCase_ : int =model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : Any =jit(model.generate )
lowerCamelCase_ : Dict =jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase_ : Tuple =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase_ : Optional[Any] =2
lowerCamelCase_ : Any =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase_ : Tuple =model_class(snake_case__ )
lowerCamelCase_ : List[str] =model.generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , snake_case__ )
lowerCamelCase_ : List[Any] =jit(model.generate )
lowerCamelCase_ : Any =jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Union[str, Any] =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowerCamelCase_ : Optional[int] =FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCamelCase_ : Dict ="Hello world"
lowerCamelCase_ : Optional[int] =tokenizer(snake_case__ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(snake_case__ , "do_samples" ):
model.generate(snake_case__ , do_samples=snake_case__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(snake_case__ , "foo" ):
lowerCamelCase_ : int ={"foo": "bar"}
model.generate(snake_case__ , **snake_case__ )
| 365 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
A__ : str = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Optional[Any] , *snake_case__ : int , **snake_case__ : Any ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 209 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Tuple = tokenizer(example['content'] , truncation=__snake_case )['input_ids']
UpperCAmelCase_ : List[Any] = len(example['content'] ) / len(output['input_ids'] )
return output
__UpperCAmelCase = HfArgumentParser(PretokenizationArguments)
__UpperCAmelCase = parser.parse_args()
if args.num_workers is None:
__UpperCAmelCase = multiprocessing.cpu_count()
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__UpperCAmelCase = time.time()
__UpperCAmelCase = load_dataset(args.dataset_name, split='train')
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
__UpperCAmelCase = time.time()
__UpperCAmelCase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
__UpperCAmelCase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : Dict =logging.get_logger(__name__)
class a_ ( A__ ):
def __init__( self : Optional[int] , *lowercase : Dict , **lowercase : List[str] ):
"""simple docstring"""
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 351 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
def __init__( self : int , lowercase : Optional[int] , lowercase : List[Any]=13 , lowercase : Dict=30 , lowercase : Dict=2 , lowercase : Tuple=3 , lowercase : Dict=True , lowercase : Dict=True , lowercase : Tuple=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[Any]=37 , lowercase : Union[str, Any]="gelu" , lowercase : List[str]=0.1 , lowercase : List[Any]=0.1 , lowercase : List[str]=10 , lowercase : Any=0.02 , lowercase : Union[str, Any]=3 , lowercase : Tuple=None , lowercase : List[str]=2 , ):
"""simple docstring"""
lowercase_ :Union[str, Any] = parent
lowercase_ :Optional[int] = batch_size
lowercase_ :Tuple = image_size
lowercase_ :Any = patch_size
lowercase_ :List[Any] = num_channels
lowercase_ :Optional[Any] = is_training
lowercase_ :str = use_labels
lowercase_ :Any = hidden_size
lowercase_ :Optional[int] = num_hidden_layers
lowercase_ :List[Any] = num_attention_heads
lowercase_ :str = intermediate_size
lowercase_ :Optional[int] = hidden_act
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = type_sequence_label_size
lowercase_ :Dict = initializer_range
lowercase_ :List[Any] = scope
lowercase_ :List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ :str = (image_size // patch_size) ** 2
lowercase_ :Any = num_patches + 2
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Optional[Any] = None
if self.use_labels:
lowercase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : Tuple , lowercase : int , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Optional[int] = TFDeiTModel(config=lowercase )
lowercase_ :str = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , lowercase : str , lowercase : Optional[Any] , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = TFDeiTForMaskedImageModeling(config=lowercase )
lowercase_ :Optional[int] = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ :List[str] = 1
lowercase_ :List[str] = TFDeiTForMaskedImageModeling(lowercase )
lowercase_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :List[str] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Tuple = self.type_sequence_label_size
lowercase_ :str = TFDeiTForImageClassification(lowercase )
lowercase_ :str = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ :Optional[int] = 1
lowercase_ :Optional[int] = TFDeiTForImageClassification(lowercase )
lowercase_ :List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :int = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Tuple = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__A = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = TFDeiTModelTester(self )
lowercase_ :List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Optional[Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase_ :Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Dense ) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ , lowercase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :Tuple = model_class(lowercase )
lowercase_ :Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :Tuple = [*signature.parameters.keys()]
lowercase_ :Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def lowercase__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Any=False ):
"""simple docstring"""
lowercase_ :Dict = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :List[Any] = TFDeiTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase_ ( ):
lowercase_ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowercase_ :Optional[int] = self.default_image_processor
lowercase_ :Union[str, Any] = prepare_img()
lowercase_ :Dict = image_processor(images=lowercase , return_tensors="tf" )
# forward pass
lowercase_ :Union[str, Any] = model(**lowercase )
# verify the logits
lowercase_ :Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowercase_ :Dict = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
| 147 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] ):
_a : Dict = []
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ):
self.events.append('on_init_end' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,**_UpperCAmelCase : Dict ):
self.events.append('on_train_begin' )
def __lowercase ( self : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[Any] ):
self.events.append('on_train_end' )
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Optional[Any] ):
self.events.append('on_epoch_begin' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
self.events.append('on_epoch_end' )
def __lowercase ( self : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ):
self.events.append('on_step_begin' )
def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Dict ):
self.events.append('on_step_end' )
def __lowercase ( self : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : Any ):
self.events.append('on_evaluate' )
def __lowercase ( self : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : int ):
self.events.append('on_predict' )
def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Tuple ):
self.events.append('on_save' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Union[str, Any] ):
self.events.append('on_log' )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,**_UpperCAmelCase : List[str] ):
self.events.append('on_prediction_step' )
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : int ):
_a : Optional[int] = tempfile.mkdtemp()
def __lowercase ( self : str ):
shutil.rmtree(self.output_dir )
def __lowercase ( self : int ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : str=0 ,_UpperCAmelCase : Dict=64 ,_UpperCAmelCase : List[str]=64 ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=False ,**_UpperCAmelCase : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_a : int = RegressionDataset(length=_UpperCAmelCase )
_a : str = RegressionDataset(length=_UpperCAmelCase )
_a : List[Any] = RegressionModelConfig(a=_UpperCAmelCase ,b=_UpperCAmelCase )
_a : Optional[int] = RegressionPreTrainedModel(_UpperCAmelCase )
_a : Optional[int] = TrainingArguments(self.output_dir ,disable_tqdm=_UpperCAmelCase ,report_to=[] ,**_UpperCAmelCase )
return Trainer(
_UpperCAmelCase ,_UpperCAmelCase ,train_dataset=_UpperCAmelCase ,eval_dataset=_UpperCAmelCase ,callbacks=_UpperCAmelCase ,)
def __lowercase ( self : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ):
self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) )
# Order doesn't matter
_a : List[Any] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ )
_a : List[str] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase ,_UpperCAmelCase ):
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase ,cba.__class__ )
elif not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
self.assertEqual(cba.__class__ ,_UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Any ,_UpperCAmelCase : Optional[Any] ):
_a : str = ['on_init_end', 'on_train_begin']
_a : str = 0
_a : Optional[int] = len(trainer.get_eval_dataloader() )
_a : List[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowercase ( self : Optional[Any] ):
_a : str = self.get_trainer()
_a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
_a : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_a : Union[str, Any] = self.get_trainer(disable_tqdm=_UpperCAmelCase )
_a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_a : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
_a : Optional[Any] = self.get_trainer()
_a : List[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ ,_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 ,_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
# We can also add, pop, or remove by instance
_a : str = self.get_trainer()
_a : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
_a : Tuple = self.get_trainer()
_a : Optional[Any] = trainer.callback_handler.callbacks[0]
_a : Optional[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 ,_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' ,category=_UpperCAmelCase )
_a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_a : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
_a : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
_a : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
_a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
_a : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
_a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='steps' )
trainer.train()
_a : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
_a : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='epoch' )
trainer.train()
_a : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
_a : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='steps' ,)
trainer.train()
_a : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
_a : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 0
while number > 0:
__UpperCAmelCase = number % 10
sum_of_digits += last_digit
__UpperCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase__ ( snake_case_ :int = 100 ):
__UpperCAmelCase = factorial(snake_case_ )
__UpperCAmelCase = split_and_add(snake_case_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 361 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowercase : List[Any] = TypeVar('T')
class _UpperCAmelCase ( Generic[T] ):
a__ : deque[T] # Cache store of keys
a__ : set[T] # References of the keys in cache
a__ : int = 10 # Maximum capacity of cache
def __init__( self : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = deque()
__UpperCAmelCase = set()
if not n:
__UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__UpperCAmelCase = n
def a ( self : Optional[Any] , _lowercase : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(_lowercase )
else:
self.dq_store.remove(_lowercase )
self.dq_store.appendleft(_lowercase )
self.key_reference.add(_lowercase )
def a ( self : str ):
for k in self.dq_store:
print(_lowercase )
def __repr__( self : Dict ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 86 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : List[str] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__snake_case = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCAmelCase ( lowercase : Any ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCAmelCase ( lowercase : int , lowercase : Dict ) -> Tuple:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : List[str] = False
elif args.student_type == "gpt2":
snake_case : Optional[int] = False
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if args.student_type == "roberta":
snake_case : Optional[Any] = False
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Any = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=lowercase , required=lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=lowercase , required=lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=lowercase , choices=["distilbert", "roberta", "gpt2"] , required=lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=lowercase , required=lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=lowercase , type=lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=lowercase , required=lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=lowercase , default=4000 , help="Checkpoint interval." )
snake_case : str = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.student_type]
snake_case ,snake_case ,snake_case : int = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
snake_case : int = teacher_tokenizer_class.from_pretrained(args.teacher_name )
snake_case : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
snake_case : List[str] = tokenizer.all_special_tokens.index(lowercase )
snake_case : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
snake_case : Any = special_tok_ids
snake_case : Dict = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
snake_case : Optional[int] = pickle.load(lowercase )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
snake_case : Dict = pickle.load(lowercase )
snake_case : str = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
snake_case : str = 0.0 # do not predict special tokens
snake_case : Dict = torch.from_numpy(lowercase )
else:
snake_case : Tuple = None
snake_case : str = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
snake_case : str = student_config_class.from_pretrained(args.student_config )
snake_case : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
snake_case : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
snake_case : Union[str, Any] = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
snake_case : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
snake_case : Optional[Any] = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 203 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Namespace ) -> int:
return TrainCommand(lowerCAmelCase )
class a ( UpperCAmelCase ):
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=A_ , required=A_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=A_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=A_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=A_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=A_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=A_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=A_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=A_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=A_ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=A_ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=A_ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=A_ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=A_ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = logging.get_logger("transformers-cli/training" )
_UpperCAmelCase : Union[str, Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=A_ )
_UpperCAmelCase : Dict = args.output
_UpperCAmelCase : Dict = args.column_label
_UpperCAmelCase : Any = args.column_text
_UpperCAmelCase : List[str] = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_UpperCAmelCase : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_UpperCAmelCase : int = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_UpperCAmelCase : Union[str, Any] = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_UpperCAmelCase : List[str] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_UpperCAmelCase : Any = args.validation_split
_UpperCAmelCase : int = args.train_batch_size
_UpperCAmelCase : int = args.valid_batch_size
_UpperCAmelCase : Dict = args.learning_rate
_UpperCAmelCase : Optional[Any] = args.adam_epsilon
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase ( self ):
'''simple docstring'''
raise NotImplementedError
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 189 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Tuple:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_UpperCAmelCase : Optional[int] = k.replace(lowerCAmelCase , lowerCAmelCase )
if k.startswith("encoder" ):
_UpperCAmelCase : Tuple = k.replace(".attn" , ".self_attn" )
_UpperCAmelCase : str = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Optional[Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
_UpperCAmelCase : Union[str, Any] = k.replace("norm1" , "self_attn_layer_norm" )
_UpperCAmelCase : Tuple = k.replace("norm2" , "encoder_attn_layer_norm" )
_UpperCAmelCase : Any = k.replace("norm3" , "final_layer_norm" )
return k
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> List[Any]:
_UpperCAmelCase : int = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_UpperCAmelCase : Dict = sd.pop(lowerCAmelCase )
_UpperCAmelCase : str = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
_UpperCAmelCase : Dict = v
SCREAMING_SNAKE_CASE_ = ['START']
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: int , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : Union[str, Any] = torch.load(lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : List[str] = model["model"]
_UpperCAmelCase : List[str] = BlenderbotConfig.from_json_file(lowerCAmelCase )
_UpperCAmelCase : str = BlenderbotForConditionalGeneration(lowerCAmelCase )
_UpperCAmelCase : List[str] = m.model.state_dict().keys()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_UpperCAmelCase : Tuple = rename_state_dict_key(lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_UpperCAmelCase : Tuple = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase )
m.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
m.half()
m.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 189 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Any ):
return int((input_a, input_a).count(1 ) != 0 )
def _snake_case ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 109 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = '''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , __lowerCAmelCase )
# Normalize whitespaces
lowerCamelCase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , __lowerCAmelCase )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
else:
lowerCamelCase__ = [self.preprocess_text(__lowerCAmelCase ) for t in text]
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(__lowerCAmelCase )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 209 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase__ ( lowercase ):
@staticmethod
@abstractmethod
def UpperCamelCase_ ( lowerCamelCase__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
raise NotImplementedError()
| 236 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Optional[int] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 236 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int , lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: int , ):
"""simple docstring"""
UpperCAmelCase_: Tuple = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCAmelCase_ , UpperCAmelCase_: str = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase_: Optional[Any] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
assert base_extractor.is_extractable(lowerCAmelCase__ )
UpperCAmelCase_: Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase_: int = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase_: Tuple = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase_: List[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[str] , lowerCAmelCase__: Tuple , lowerCAmelCase__: Tuple , lowerCAmelCase__: str , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: int , lowerCAmelCase__: Dict , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Dict , ):
"""simple docstring"""
UpperCAmelCase_: Any = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCAmelCase_: Any = input_paths[compression_format]
if input_path is None:
UpperCAmelCase_: Union[str, Any] = F'for \'{compression_format}\' compression_format, '
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
UpperCAmelCase_: List[str] = Extractor.infer_extractor_format(lowerCAmelCase__ )
assert extractor_format is not None
UpperCAmelCase_: List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase_: List[str] = file_path.read_text(encoding="""utf-8""" )
else:
UpperCAmelCase_: Any = output_path.read_text(encoding="""utf-8""" )
UpperCAmelCase_: Union[str, Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: int ):
"""simple docstring"""
import tarfile
UpperCAmelCase_: Dict = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCAmelCase_: List[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(lowerCAmelCase__ , """w""" ) as f:
f.add(lowerCAmelCase__ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ (lowerCAmelCase__: Any ):
"""simple docstring"""
import tarfile
UpperCAmelCase_: Dict = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCAmelCase_: Union[str, Any] = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=lowerCAmelCase__ )
with tarfile.TarFile(lowerCAmelCase__ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Tuple , lowerCAmelCase__: Dict , lowerCAmelCase__: int , lowerCAmelCase__: Dict , lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCAmelCase_: Any = insecure_tar_files[insecure_tar_file]
UpperCAmelCase_: Tuple = tmp_path / """extracted"""
TarExtractor.extract(lowerCAmelCase__ , lowerCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Dict = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase_: List[str] = (
B"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
B"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
B"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
B"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(lowerCAmelCase__ )
assert zipfile.is_zipfile(str(lowerCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(lowerCAmelCase__ ) # but we're right
| 147 |
import os
# Precomputes a list of the 100 first triangular numbers
a : Optional[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = os.path.dirname(os.path.realpath(lowerCAmelCase__ ) )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , """words.txt""" )
UpperCAmelCase_: int = """"""
with open(lowerCAmelCase__ ) as f:
UpperCAmelCase_: str = f.readline()
UpperCAmelCase_: Optional[int] = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
UpperCAmelCase_: Union[str, Any] = [
word
for word in [sum(ord(lowerCAmelCase__ ) - 6_4 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 147 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 3_2
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase = 16 ):
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_UpperCAmelCase = DatasetDict(
{
"""train""": dataset["""train"""].select(lowercase ),
"""validation""": dataset["""train"""].select(lowercase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""test"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
# Download the dataset
_UpperCAmelCase = load_dataset("""glue""" ,"""mrpc""" )
# Create our splits
_UpperCAmelCase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
# New Code #
# Create our folds:
_UpperCAmelCase = kfold.split(np.zeros(datasets["""train"""].num_rows ) ,datasets["""train"""]["""label"""] )
_UpperCAmelCase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_fold_dataloaders(
lowercase ,lowercase ,lowercase ,lowercase ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=1_00 ,num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,lowercase )
# New Code #
# We also run predictions on the test set at the very end
_UpperCAmelCase = []
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowercase )
_UpperCAmelCase = outputs.logits
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_UpperCAmelCase = torch.cat(lowercase ,dim=0 )
_UpperCAmelCase = torch.stack(lowercase ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_UpperCAmelCase = metric.compute(predictions=lowercase ,references=lowercase )
accelerator.print("""Average test metrics from all folds:""" ,lowercase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" ,type=lowercase ,default=3 ,help="""The number of splits to perform across the dataset""" )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 365 | """simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'vision-encoder-decoder'
_snake_case : Optional[int] = True
def __init__( self : int , **__lowerCAmelCase : Any ):
super().__init__(**__lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
_UpperCAmelCase = True
@classmethod
def lowerCAmelCase_ ( cls : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , **__lowerCAmelCase : str ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class a ( lowerCAmelCase_ ):
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : int ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : Tuple ):
return 1e-4
@property
def lowerCAmelCase_ ( self : Dict ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : "PreTrainedTokenizerBase" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , ):
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = dummy_input["""input_ids"""].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop("""input_ids""" )
_UpperCAmelCase = dummy_input.pop("""attention_mask""" )
_UpperCAmelCase = torch.zeros(__lowerCAmelCase )
return common_inputs
class a ( lowerCAmelCase_ ):
@property
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(__lowerCAmelCase )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" ):
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(__lowerCAmelCase , __lowerCAmelCase )
| 30 | 0 |
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
def get_matched_characters(lowercase , lowercase ) -> str:
__lowercase = []
__lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__lowercase = int(max(0 , i - limit ) )
__lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCamelCase )
__lowercase = F"{_stra[0:_stra.index(_UpperCamelCase )]} {_stra[_stra.index(_UpperCamelCase ) + 1:]}"
return "".join(_UpperCamelCase )
# matching characters
__lowercase = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
__lowercase = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
__lowercase = len(_UpperCamelCase )
# transposition
__lowercase = (
len([(ca, ca) for ca, ca in zip(_UpperCamelCase , _UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__lowercase = 0.0
else:
__lowercase = (
1
/ 3
* (
match_count / len(_UpperCamelCase )
+ match_count / len(_UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world""")) | 210 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset | 189 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase : int =TypeVar('''T''')
class __a ( Generic[T] ):
_lowerCAmelCase : deque[T] # Cache store of keys
_lowerCAmelCase : set[T] # References of the keys in cache
_lowerCAmelCase : int = 1_0 # Maximum capacity of cache
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = deque()
UpperCamelCase__ : Optional[int] = set()
if not n:
UpperCamelCase__ : Tuple = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
UpperCamelCase__ : Dict = n
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase__ : int = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE )
self.key_reference.add(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE )
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : LRUCache[str | int] =LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 189 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Tuple:
a = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias'''))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
])
return rename_keys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[str]:
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
a = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''')
a = in_proj_weight[
: encoder_config.hidden_size, :
]
a = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
a = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Union[str, Any]:
a = dct.pop(__UpperCamelCase)
a = val
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> List[str]:
if "handwritten" in checkpoint_url:
a = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
a = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
a = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase).raw).convert("RGB")
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Dict:
a = ViTConfig(image_size=3_84 , qkv_bias=__UpperCamelCase)
a = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
a = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
a = 10_24
a = 40_96
a = 24
a = 16
a = 10_24
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL")
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
a = False
a = "relu"
a = 10_24
a = True
a = False
a = False
# load HuggingFace model
a = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase)
a = TrOCRForCausalLM(__UpperCamelCase)
a = VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase)
model.eval()
# load state_dict of original model, rename some keys
a = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase)["model"]
a = create_rename_keys(__UpperCamelCase , __UpperCamelCase)
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
a = state_dict.pop(__UpperCamelCase)
if key.startswith("decoder") and "output_projection" not in key:
a = val
else:
a = val
# load state dict
model.load_state_dict(__UpperCamelCase)
# Check outputs on an image
a = ViTImageProcessor(size=encoder_config.image_size)
a = RobertaTokenizer.from_pretrained("roberta-large")
a = TrOCRProcessor(__UpperCamelCase , __UpperCamelCase)
a = processor(images=prepare_img(__UpperCamelCase) , return_tensors="pt").pixel_values
# verify logits
a = torch.tensor([[model.config.decoder.decoder_start_token_id]])
a = model(pixel_values=__UpperCamelCase , decoder_input_ids=__UpperCamelCase)
a = outputs.logits
a = torch.Size([1, 1, 5_02_65])
if "trocr-base-handwritten" in checkpoint_url:
a = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311])
elif "trocr-large-handwritten" in checkpoint_url:
a = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170])
elif "trocr-base-printed" in checkpoint_url:
a = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210])
elif "trocr-large-printed" in checkpoint_url:
a = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCamelCase , atol=1e-3), "First elements of logits not as expected"
Path(__UpperCamelCase).mkdir(exist_ok=__UpperCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(__UpperCamelCase)
print(f'''Saving processor to {pytorch_dump_folder_path}''')
processor.save_pretrained(__UpperCamelCase)
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
lowercase__ : str = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 180 |
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 0.1) -> int:
a = 3
a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(__UpperCamelCase)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig):
_a = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder):
_a = PandasConfig
def SCREAMING_SNAKE_CASE ( self: Any ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[str] ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowercase :int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCAmelCase , (str, list, tuple) ):
lowercase :Union[str, Any] = data_files
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :Optional[int] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowercase :List[Any] = []
for split_name, files in data_files.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase :Optional[Any] = [dl_manager.iter_files(_lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase :Any = table_cast(_lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Union[str, Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ):
with open(_lowerCAmelCase , "rb" ) as f:
lowercase :int = pa.Table.from_pandas(pd.read_pickle(_lowerCAmelCase ) )
yield i, self._cast_table(_lowerCAmelCase )
| 236 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[str] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowercase_ = logging.getLogger(__name__)
lowercase_ = tf.data.AUTOTUNE
def __UpperCamelCase () -> int:
lowercase__ = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_UpperCAmelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_UpperCAmelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_UpperCAmelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_UpperCAmelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_UpperCAmelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_UpperCAmelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_UpperCAmelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_UpperCAmelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_UpperCAmelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCAmelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_UpperCAmelCase , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_UpperCAmelCase , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_UpperCAmelCase , default=0.1_5 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_UpperCAmelCase , help='Model ID to upload to on the Hugging Face Hub.' )
lowercase__ = parser.parse_args()
return args
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
try:
if args.tpu_name:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_UpperCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCAmelCase )
return tpu
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = 0
for file in file_list:
lowercase__ = file.split('/' )[-1]
lowercase__ = re.search(R'-\d+-(\d+)\.tfrecord' , _UpperCAmelCase ).group(1 )
lowercase__ = int(_UpperCAmelCase )
num_samples += sample_count
return num_samples
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
lowercase__ = count_samples(_UpperCAmelCase )
lowercase__ = tf.data.Dataset.from_tensor_slices(_UpperCAmelCase )
if shuffle:
lowercase__ = dataset.shuffle(len(_UpperCAmelCase ) )
lowercase__ = tf.data.TFRecordDataset(_UpperCAmelCase , num_parallel_reads=_UpperCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase__ = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCAmelCase ) )
lowercase__ = dataset.map(_UpperCAmelCase , num_parallel_calls=_UpperCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
lowercase__ = dataset.shuffle(args.shuffle_buffer_size )
lowercase__ = dataset.batch(_UpperCAmelCase , drop_remainder=_UpperCAmelCase )
lowercase__ = dataset.map(_UpperCAmelCase , num_parallel_calls=_UpperCAmelCase )
lowercase__ = dataset.prefetch(_UpperCAmelCase )
return dataset
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
if not args.no_tpu:
lowercase__ = initialize_tpu(_UpperCAmelCase )
lowercase__ = tf.distribute.TPUStrategy(_UpperCAmelCase )
else:
lowercase__ = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase__ = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase__ = tokenizer.vocab_size
lowercase__ = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(F"""No .tfrecord files found in {args.train_dataset}.""" )
lowercase__ = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(F"""No .tfrecord files found in {args.eval_dataset}.""" )
lowercase__ = count_samples(_UpperCAmelCase )
lowercase__ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase__ = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase__ = TFAutoModelForMaskedLM.from_config(_UpperCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase__ = create_optimizer(
num_train_steps=_UpperCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCAmelCase , metrics=['accuracy'] )
def decode_fn(_SCREAMING_SNAKE_CASE ):
lowercase__ = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCAmelCase , _UpperCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase__ = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm_probability=args.mlm_probability , mlm=_UpperCAmelCase , return_tensors='tf' )
def mask_with_collator(_SCREAMING_SNAKE_CASE ):
# TF really needs an isin() function
lowercase__ = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
lowercase__ = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_UpperCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCAmelCase , )
return batch
lowercase__ = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase__ = prepare_dataset(
_UpperCAmelCase , decode_fn=_UpperCAmelCase , mask_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , shuffle=_UpperCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase__ = prepare_dataset(
_UpperCAmelCase , decode_fn=_UpperCAmelCase , mask_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , shuffle=_UpperCAmelCase , )
lowercase__ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCAmelCase ) )
model.fit(
_UpperCAmelCase , validation_data=_UpperCAmelCase , epochs=args.num_epochs , callbacks=_UpperCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowercase_ = parse_args()
main(args)
| 351 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269 | 0 |
"""simple docstring"""
from collections.abc import Callable
def A_ ( _lowerCAmelCase : Callable[[float], float], _lowerCAmelCase : float, _lowerCAmelCase : float ):
"""simple docstring"""
_a = a
_a = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_a = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
_a = mid
else:
_a = mid
_a = start + (end - start) / 2.0
return mid
def A_ ( _lowerCAmelCase : float ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod() | 320 |
import argparse
import os
import re
__a = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__a = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
__a = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def a ( snake_case__: str , snake_case__: bool = False ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ = f.read()
lowercase_ = content.split('''\n''' )
lowercase_ = []
lowercase_ = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowercase_ = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowercase_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowercase_ = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowercase_ = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def a ( snake_case__: bool = False ):
'''simple docstring'''
lowercase_ = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('''.py''' )]
lowercase_ = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
lowercase_ = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__a = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30 | 0 |
from __future__ import annotations
from typing import Any
class A :
"""simple docstring"""
def __init__( self : int,lowercase_ : int,lowercase_ : int,lowercase_ : float = 0 )-> None:
'''simple docstring'''
A__ , A__ = row, column
A__ = [[default_value for c in range(lowercase_ )] for r in range(lowercase_ )]
def __str__( self : List[str] )-> str:
'''simple docstring'''
A__ = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
A__ = 0
for row_vector in self.array:
for obj in row_vector:
A__ = max(lowercase_,len(str(lowercase_ ) ) )
A__ = F'%{max_element_length}s'
# Make string and return
def single_line(lowercase_ : list[float] ) -> str:
nonlocal string_format_identifier
A__ = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase_ ) for row_vector in self.array )
return s
def __repr__( self : Any )-> str:
'''simple docstring'''
return str(self )
def snake_case__ ( self : Optional[int],lowercase_ : tuple[int, int] )-> bool:
'''simple docstring'''
if not (isinstance(lowercase_,(list, tuple) ) and len(lowercase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int,lowercase_ : tuple[int, int] )-> Any:
'''simple docstring'''
assert self.validate_indicies(lowercase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int,lowercase_ : tuple[int, int],lowercase_ : float )-> None:
'''simple docstring'''
assert self.validate_indicies(lowercase_ )
A__ = value
def __add__( self : Optional[Any],lowercase_ : Matrix )-> Matrix:
'''simple docstring'''
assert isinstance(lowercase_,lowercase_ )
assert self.row == another.row and self.column == another.column
# Add
A__ = Matrix(self.row,self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] + another[r, c]
return result
def __neg__( self : Dict )-> Matrix:
'''simple docstring'''
A__ = Matrix(self.row,self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = -self[r, c]
return result
def __sub__( self : str,lowercase_ : Matrix )-> Matrix:
'''simple docstring'''
return self + (-another)
def __mul__( self : List[Any],lowercase_ : int | float | Matrix )-> Matrix:
'''simple docstring'''
if isinstance(lowercase_,(int, float) ): # Scalar multiplication
A__ = Matrix(self.row,self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c] * another
return result
elif isinstance(lowercase_,lowercase_ ): # Matrix multiplication
assert self.column == another.row
A__ = Matrix(self.row,another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ = F'Unsupported type given for another ({type(lowercase_ )})'
raise TypeError(lowercase_ )
def snake_case__ ( self : List[Any] )-> Matrix:
'''simple docstring'''
A__ = Matrix(self.column,self.row )
for r in range(self.row ):
for c in range(self.column ):
A__ = self[r, c]
return result
def snake_case__ ( self : List[str],lowercase_ : Matrix,lowercase_ : Matrix )-> Any:
'''simple docstring'''
assert isinstance(lowercase_,lowercase_ ) and isinstance(lowercase_,lowercase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ = v.transpose()
A__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _snake_case( ) -> None:
A__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ = 1
print(f'a^(-1) is {ainv}' )
# u, v
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 1, 2, -3
A__ = Matrix(3 , 1 , 0 )
A__ , A__ , A__ = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' )
def _snake_case( ) -> None:
import doctest
doctest.testmod()
testa()
| 353 |
import datasets
from .evaluate import evaluate
lowercase_ = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
lowercase_ = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
lowercase_ = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ),codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'],)
def snake_case__ ( self : str,lowercase_ : List[Any],lowercase_ : Dict )-> List[str]:
'''simple docstring'''
A__ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
A__ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase_,predictions=lowercase_ )
return score
| 282 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase : Dict = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase : Union[str, Any] = concatenate_datasets
UpperCamelCase : Any = DownloadConfig
UpperCamelCase : List[Any] = DownloadManager
UpperCamelCase : Dict = DownloadMode
UpperCamelCase : Any = DownloadConfig
UpperCamelCase : Union[str, Any] = DownloadMode
UpperCamelCase : Any = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 | 1 |
def a( A : int ) -> bool:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
a = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 0:
return False
a = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "tokenizer"]
__A = "ViTImageProcessor"
__A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if visual_prompt is not None and images is not None:
a = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
a = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , )
return self.image_processor_class
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , )
return self.image_processor
| 71 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
_A = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_A = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ ) , x.transpose() ) )
_A = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
_A = np.random.randn(3 , 4 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ ) , transpose(lowerCAmelCase_ ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ , axes=(1, 2, 0) ) , transpose(lowerCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = np.random.randn(3 , 4 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ ) , transpose(lowerCAmelCase_ ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ , axes=(1, 2, 0) ) , transpose(lowerCAmelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = np.random.randn(3 , 4 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ ) , np.asarray(transpose(lowerCAmelCase_ ) ) ) )
_A = np.random.randn(3 , 4 , 5 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(transpose(lowerCAmelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCAmelCase_ , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase ( self ) -> List[str]:
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (4, 3) ) , np.reshape(lowerCAmelCase_ , (4, 3) ) ) )
_A = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (12, 5) ) , np.reshape(lowerCAmelCase_ , (12, 5) ) ) )
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = np.random.randn(3 , 4 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (4, 3) ) , reshape(lowerCAmelCase_ , (4, 3) ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (12, 5) ) , reshape(lowerCAmelCase_ , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = np.random.randn(3 , 4 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (4, 3) ) , reshape(lowerCAmelCase_ , (4, 3) ).numpy() ) )
_A = np.random.randn(3 , 4 , 5 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (12, 5) ) , reshape(lowerCAmelCase_ , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> List[str]:
_A = np.random.randn(3 , 4 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (4, 3) ) , np.asarray(reshape(lowerCAmelCase_ , (4, 3) ) ) ) )
_A = np.random.randn(3 , 4 , 5 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(reshape(lowerCAmelCase_ , (12, 5) ) , np.asarray(reshape(lowerCAmelCase_ , (12, 5) ) ) ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ ) , np.squeeze(lowerCAmelCase_ ) ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ , axis=2 ) , np.squeeze(lowerCAmelCase_ , axis=2 ) ) )
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = np.random.randn(1 , 3 , 4 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ ) , squeeze(lowerCAmelCase_ ).numpy() ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ , axis=2 ) , squeeze(lowerCAmelCase_ , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> str:
_A = np.random.randn(1 , 3 , 4 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ ) , squeeze(lowerCAmelCase_ ).numpy() ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ , axis=2 ) , squeeze(lowerCAmelCase_ , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Optional[int]:
_A = np.random.randn(1 , 3 , 4 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ ) , np.asarray(squeeze(lowerCAmelCase_ ) ) ) )
_A = np.random.randn(1 , 4 , 1 , 5 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase_ , axis=2 ) , np.asarray(squeeze(lowerCAmelCase_ , axis=2 ) ) ) )
def UpperCAmelCase ( self ) -> Any:
_A = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase_ , axis=1 ) , np.expand_dims(lowerCAmelCase_ , axis=1 ) ) )
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = np.random.randn(3 , 4 )
_A = torch.tensor(lowerCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase_ , axis=1 ) , expand_dims(lowerCAmelCase_ , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase ( self ) -> int:
_A = np.random.randn(3 , 4 )
_A = tf.constant(lowerCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase_ , axis=1 ) , expand_dims(lowerCAmelCase_ , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase ( self ) -> Optional[int]:
_A = np.random.randn(3 , 4 )
_A = jnp.array(lowerCAmelCase_ )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase_ , axis=1 ) , np.asarray(expand_dims(lowerCAmelCase_ , axis=1 ) ) ) )
| 180 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = '''biogpt'''
def __init__( self , lowerCAmelCase_=4_23_84 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = scale_embedding
_A = use_cache
_A = layerdrop
_A = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 180 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
A : Optional[Any] = {'target_lang': 'fi', 'source_lang': 'en'}
A : List[Any] = '>>zh<<'
A : int = 'Helsinki-NLP/'
if is_torch_available():
A : Tuple = 'pt'
elif is_tf_available():
A : Any = 'tf'
else:
A : Any = 'jax'
@require_sentencepiece
class __A( a , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
__a = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = Path(self.tmpdirname )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
__a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self , **_snake_case ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = '''</s>'''
__a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_snake_case ) , 9 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
__a = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__a = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case , batch.input_ids[0] )
__a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
__a = [x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''' , _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_tokenizer()
__a = tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''] , padding=_snake_case , truncation=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.get_tokenizer()
__a = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
__a = '''Tämä on testi'''
__a = '''This is a test'''
__a = [76, 7, 2_047, 2]
__a = [69, 12, 11, 940, 2]
__a = tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
__a = tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case , _snake_case )
__a = tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case ) | 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['MobileViTFeatureExtractor']
A : str = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"]) | 269 | 0 |
import functools
from typing import Any
def _A ( _a : str , _a : list[str] ):
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
A = {}
A = "WORD_KEEPER"
for word in words:
A = trie
for c in word:
if c not in trie_node:
A = {}
A = trie_node[c]
A = True
A = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_a : int ) -> bool:
if index == len_string:
return True
A = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
A = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = "roformer"
def __init__( self : List[str] , A : List[Any]=50000 , A : Dict=None , A : Tuple=768 , A : Optional[Any]=12 , A : Tuple=12 , A : Dict=3072 , A : List[Any]="gelu" , A : Optional[Any]=0.1 , A : str=0.1 , A : int=1536 , A : Optional[int]=2 , A : List[str]=0.02 , A : List[str]=1E-12 , A : Optional[int]=0 , A : List[Any]=False , A : List[str]=True , **A : Dict , ):
super().__init__(pad_token_id=A , **A )
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size if embedding_size is None else embedding_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : List[str] = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = rotary_value
_UpperCAmelCase : Optional[int] = use_cache
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@property
def _A ( self : Tuple ):
if self.task == "multiple-choice":
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "sequence"}
_UpperCAmelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a ( UpperCAmelCase ):
_lowercase = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 350 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , snake_case_ : Dict[str, int] , snake_case_ : List[str] , snake_case_ : int = None , snake_case_ : int = None ):
super().__init__()
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = max_length
_UpperCAmelCase = vocab
_UpperCAmelCase = merges
_UpperCAmelCase = BytePairTokenizer(snake_case_ , snake_case_ , sequence_length=snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : GPTaTokenizer , *snake_case_ : List[Any] , **snake_case_ : Any ):
_UpperCAmelCase = [" ".join(snake_case_ ) for m in tokenizer.bpe_ranks.keys()]
_UpperCAmelCase = tokenizer.get_vocab()
return cls(snake_case_ , snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case_ , *snake_case_ , **snake_case_ )
return cls.from_tokenizer(snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : int , snake_case_ : List[Any] ):
return cls(**snake_case_ )
def lowercase ( self : str ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int = None ):
_UpperCAmelCase = self.tf_tokenizer(snake_case_ )
_UpperCAmelCase = tf.ones_like(snake_case_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_UpperCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
_UpperCAmelCase , _UpperCAmelCase = pad_model_inputs(
snake_case_ , max_seq_length=snake_case_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 22 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ :List[str] = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
A_ :Optional[Any] = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def A ( a_ ,a_ ) -> str:
__UpperCamelCase : Any ={
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__UpperCamelCase : Tuple =int(re.match(r'.*layer_(\d*).*' ,a_ )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def A ( a_ ) -> Any:
if dtype == torch.bool:
return 1 / 8
__UpperCamelCase : Dict =re.search(r'[^\d](\d+)$' ,str(a_ ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
__UpperCamelCase : Tuple =int(bit_search.groups()[0] )
return bit_size // 8
def A ( a_ ,a_ ,a_ ,a_ ,a_ ) -> Dict:
# Construct model
if bloom_config_file == "":
__UpperCamelCase : List[Any] =BloomConfig()
else:
__UpperCamelCase : List[str] =BloomConfig.from_json_file(a_ )
if shard_model:
__UpperCamelCase : int =os.listdir(a_ )
__UpperCamelCase : Union[str, Any] =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Optional[Any] ={'weight_map': {}, 'metadata': {}}
__UpperCamelCase : Dict =0
__UpperCamelCase : int =None
__UpperCamelCase : Any =BloomConfig()
for j, file in enumerate(a_ ):
print('Processing file: {}'.format(a_ ) )
__UpperCamelCase : Optional[int] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Dict =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : Optional[Any] =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : int =list(temp.keys() )
for key in keys:
__UpperCamelCase : Dict =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Any =temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : Any =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Optional[Any] =tensors[key] / pretraining_tp
torch.save(
a_ ,os.path.join(
a_ ,'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) ) ,) ,)
for key in tensors.keys():
__UpperCamelCase : Union[str, Any] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__UpperCamelCase : int ='pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) ,str(len(a_ ) ).zfill(5 ) )
__UpperCamelCase : Union[str, Any] =BloomConfig()
__UpperCamelCase : Tuple =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase : Optional[int] =total_size
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ ,WEIGHTS_NAME + '.index.json' ) ,'w' ,encoding='utf-8' ) as f:
__UpperCamelCase : List[Any] =json.dumps(a_ ,indent=2 ,sort_keys=a_ ) + '\n'
f.write(a_ )
else:
__UpperCamelCase : List[Any] =BloomModel(a_ )
__UpperCamelCase : Optional[Any] =os.listdir(a_ )
__UpperCamelCase : Dict =sorted(filter(lambda a_ : s.startswith('layer' ) and "model_00" in s ,a_ ) )
__UpperCamelCase : Any =None
for i, file in enumerate(a_ ):
__UpperCamelCase : Union[str, Any] =None
for i in range(a_ ):
# load all TP files
__UpperCamelCase : Optional[Any] =file.replace('model_00' ,F'model_0{i}' )
__UpperCamelCase : str =torch.load(os.path.join(a_ ,a_ ) ,map_location='cpu' )
# Rename keys in the transformers names
__UpperCamelCase : List[str] =list(temp.keys() )
for key in keys:
__UpperCamelCase : Union[str, Any] =temp.pop(a_ )
if tensors is None:
__UpperCamelCase : Optional[Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__UpperCamelCase : Optional[int] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__UpperCamelCase : int =torch.cat([tensors[key], temp[key]] ,dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__UpperCamelCase : Dict =tensors[key] / pretraining_tp
__UpperCamelCase : str =model.load_state_dict(a_ ,strict=a_ )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
__UpperCamelCase : str =set(other_keys.missing_keys )
else:
__UpperCamelCase : int =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a_ ,exist_ok=a_ )
__UpperCamelCase : Optional[int] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Dict =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
__UpperCamelCase : List[str] =model.to(config.torch_dtype )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
A_ :str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 71 | 0 |
"""simple docstring"""
import qiskit
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_snake_case = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_snake_case = qiskit.QuantumCircuit(A__ , A__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_snake_case = qiskit.execute(A__ , A__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A__ )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(F"Total count for various states are: {counts}")
| 357 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
__a = MODEL_FOR_MASKED_LM_MAPPING
__a = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase ( self : Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : List[str] ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
_snake_case = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=6 ) , [
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def lowercase ( self : Optional[Any] ):
_snake_case = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
_snake_case = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
@slow
@require_torch
def lowercase ( self : Dict ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowerCamelCase )
@slow
@require_tf
def lowercase ( self : Tuple ):
_snake_case = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Optional[int] ):
_snake_case = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
_snake_case = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
_snake_case = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def lowercase ( self : str ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
@require_tf
def lowercase ( self : Any ):
_snake_case = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
_snake_case = None
_snake_case = None
self.run_pipeline_test(_lowerCamelCase , [] )
def lowercase ( self : int , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] ):
_snake_case = fill_masker.tokenizer
_snake_case = fill_masker.model
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
with self.assertRaises(_lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowerCamelCase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowerCamelCase , _lowerCamelCase )
self.run_test_targets(_lowerCamelCase , _lowerCamelCase )
self.run_test_top_k_targets(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowerCamelCase , _lowerCamelCase )
self.fill_mask_with_multiple_masks(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
_snake_case = tokenizer.get_vocab()
_snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , targets=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Call argument
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowerCamelCase )
_snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowerCamelCase ) )
# Score equivalence
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''token_str'''] for top_mask in outputs]
_snake_case = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ) == set(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_lowerCamelCase )
_snake_case = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='''''' )
def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase , top_k=2 )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
_snake_case = tokenizer.get_vocab()
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
# top_k=2, ntargets=3
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_snake_case = [el['''token_str'''] for el in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x["score"] , reverse=_lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowerCamelCase ).issubset(_lowerCamelCase ):
_snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowerCamelCase ) , nested_simplify(_lowerCamelCase ) )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : int ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
_snake_case = sorted(vocab.keys() )[:3]
_snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_lowerCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowerCamelCase ) , 3 )
def lowercase ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple ):
_snake_case = FillMaskPipeline(model=_lowerCamelCase , tokenizer=_lowerCamelCase )
_snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
[
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
{'''sequence''': ANY(_lowerCamelCase ), '''score''': ANY(_lowerCamelCase ), '''token''': ANY(_lowerCamelCase ), '''token_str''': ANY(_lowerCamelCase )},
],
] , )
| 40 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : Dict = "AutoImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = "AutoTokenizer"
def __init__( self : Optional[Any] , A : Any=None , A : List[Any]=None , **A : List[str] ) -> Optional[int]:
lowercase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowercase_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
lowercase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
lowercase_ : List[Any] = self.image_processor
lowercase_ : int = False
def __call__( self : Tuple , *A : Optional[Any] , **A : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A , **A )
lowercase_ : int = kwargs.pop('''images''' , A )
lowercase_ : List[str] = kwargs.pop('''text''' , A )
if len(A ) > 0:
lowercase_ : List[Any] = args[0]
lowercase_ : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase_ : str = self.image_processor(A , *A , **A )
if text is not None:
lowercase_ : Dict = self.tokenizer(A , **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase_ : int = encodings['''input_ids''']
return inputs
def A ( self : int , *A : Tuple , **A : Optional[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : Any , *A : List[str] , **A : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*A , **A )
@contextmanager
def A ( self : Dict ) -> Union[str, Any]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[int] = self.tokenizer
yield
lowercase_ : Dict = self.image_processor
lowercase_ : List[Any] = False
def A ( self : Any , A : Optional[Any] , A : Optional[int]=False , A : Any=None ) -> Dict:
if added_vocab is None:
lowercase_ : Tuple = self.tokenizer.get_added_vocab()
lowercase_ : Optional[int] = {}
while tokens:
lowercase_ : int = re.search(R'''<s_(.*?)>''' , A , re.IGNORECASE )
if start_token is None:
break
lowercase_ : Tuple = start_token.group(1 )
lowercase_ : Optional[Any] = re.search(RF'''</s_{key}>''' , A , re.IGNORECASE )
lowercase_ : int = start_token.group()
if end_token is None:
lowercase_ : int = tokens.replace(A , '''''' )
else:
lowercase_ : Any = end_token.group()
lowercase_ : int = re.escape(A )
lowercase_ : Tuple = re.escape(A )
lowercase_ : Optional[int] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , A , re.IGNORECASE )
if content is not None:
lowercase_ : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase_ : List[Any] = self.tokenajson(A , is_inner_value=A , added_vocab=A )
if value:
if len(A ) == 1:
lowercase_ : Optional[int] = value[0]
lowercase_ : str = value
else: # leaf nodes
lowercase_ : int = []
for leaf in content.split(R'''<sep/>''' ):
lowercase_ : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase_ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(A )
if len(output[key] ) == 1:
lowercase_ : Dict = output[key][0]
lowercase_ : int = tokens[tokens.find(A ) + len(A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=A , added_vocab=A )
if len(A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A ( self : Any ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def A ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 33 |
"""simple docstring"""
__A : Any = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 33 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(_lowerCAmelCase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_A = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCAmelCase ( ):
__UpperCamelCase =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__UpperCamelCase =bs[:]
__UpperCamelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
__UpperCamelCase =[chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =set()
__UpperCamelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase =char
return pairs
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_ , A_="replace" , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=False , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =errors # how to handle errors in decoding
__UpperCamelCase =bytes_to_unicode()
__UpperCamelCase ={v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
__UpperCamelCase =merges_handle.read().split('\n' )[1:-1]
__UpperCamelCase =[tuple(merge.split() ) for merge in bpe_merges]
__UpperCamelCase =dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase ={}
__UpperCamelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCamelCase =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _a ( self ) -> Union[str, Any]:
return len(self.encoder )
def _a ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self , A_ ) -> List[str]:
if token in self.cache:
return self.cache[token]
__UpperCamelCase =tuple(A_ )
__UpperCamelCase =get_pairs(A_ )
if not pairs:
return token
while True:
__UpperCamelCase =min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase =bigram
__UpperCamelCase =[]
__UpperCamelCase =0
while i < len(A_ ):
try:
__UpperCamelCase =word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase =j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase =tuple(A_ )
__UpperCamelCase =new_word
if len(A_ ) == 1:
break
else:
__UpperCamelCase =get_pairs(A_ )
__UpperCamelCase =' '.join(A_ )
__UpperCamelCase =word
return word
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =[]
for token in re.findall(self.pat , A_ ):
__UpperCamelCase =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def _a ( self , A_ ) -> Dict:
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def _a ( self , A_ ) -> Optional[Any]:
return self.decoder.get(A_ )
def _a ( self , A_ ) -> int:
__UpperCamelCase =''.join(A_ )
__UpperCamelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
__UpperCamelCase =0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__UpperCamelCase =token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , A_ , A_=False , **A_ ) -> Tuple:
__UpperCamelCase =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
__UpperCamelCase =' ' + text
return (text, kwargs)
def _a ( self , A_ , A_ = None ) -> Tuple:
return token_ids_a + [self.eos_token_id]
def _a ( self , A_ ) -> List[int]:
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
__UpperCamelCase =' '.join(A_ )
__UpperCamelCase =self.encode(A_ )
if len(A_ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 62 | """simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def a_ ( _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1 , _lowerCAmelCase : float = 1.0E4 , _lowerCAmelCase : bool = False , _lowerCAmelCase : float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowercase__ : Optional[Any] = float(embedding_dim // 2 )
lowercase__ : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase__ : Any = min_timescale * jnp.exp(jnp.arange(_lowerCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase__ : Dict = jnp.expand_dims(_lowerCAmelCase , 1 ) * jnp.expand_dims(_lowerCAmelCase , 0 )
# scale embeddings
lowercase__ : List[str] = scale * emb
if flip_sin_to_cos:
lowercase__ : Dict = jnp.concatenate([jnp.cos(_lowerCAmelCase ), jnp.sin(_lowerCAmelCase )] , axis=1 )
else:
lowercase__ : Optional[int] = jnp.concatenate([jnp.sin(_lowerCAmelCase ), jnp.cos(_lowerCAmelCase )] , axis=1 )
lowercase__ : List[Any] = jnp.reshape(_lowerCAmelCase , [jnp.shape(_lowerCAmelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , a ) -> Any:
lowercase__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(a )
lowercase__ : Union[str, Any] = nn.silu(a )
lowercase__ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(a )
return temb
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ : int = 3_2
lowerCamelCase__ : bool = False
lowerCamelCase__ : float = 1
@nn.compact
def __call__( self , a ) -> str:
return get_sinusoidal_embeddings(
a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 77 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> np.ndarray:
lowerCamelCase__ : Tuple = np.zeros_like(UpperCamelCase )
lowerCamelCase__ : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCamelCase__ : List[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCamelCase__ : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCamelCase__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_A : Optional[int] =Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
_A : str =np.array(Image.open(lena_path))
# kernel to be applied
_A : Optional[Any] =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_A : str =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_A : Any =Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 354 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = torch.exp(A_ )
lowerCAmelCase__ : Tuple = torch.sum(A_ , dim=1 ) # sum of exp(x_i)
lowerCAmelCase__ : Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A_ ) - B / A
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,lowercase_ : Optional[int] ):
super().__init__()
lowerCAmelCase__ : Tuple = config.output_attentions
lowerCAmelCase__ : List[str] = config.output_hidden_states
lowerCAmelCase__ : Union[str, Any] = nn.ModuleList([BertLayer(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase__ : int = nn.ModuleList([BertHighway(lowercase_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase__ : Any = [-1 for _ in range(config.num_hidden_layers )]
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ):
if (type(lowercase_ ) is float) or (type(lowercase_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase__ : List[str] = x
else:
lowerCAmelCase__ : Optional[Any] = x
def __lowerCAmelCase ( self : Any ,lowercase_ : int ):
lowerCAmelCase__ : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Optional[Any]=None ,):
lowerCAmelCase__ : Tuple = ()
lowerCAmelCase__ : Optional[int] = ()
lowerCAmelCase__ : str = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase__ : Dict = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : str = layer_module(
lowercase_ ,lowercase_ ,head_mask[i] ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : int = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase__ : Optional[int] = all_attentions + (layer_outputs[1],)
lowerCAmelCase__ : str = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase__ : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase__ : Any = current_outputs + (all_attentions,)
lowerCAmelCase__ : Dict = self.highway[i](lowercase_ )
# logits, pooled_output
if not self.training:
lowerCAmelCase__ : str = highway_exit[0]
lowerCAmelCase__ : str = entropy(lowercase_ )
lowerCAmelCase__ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase__ : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase__ : str = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase_ ,i + 1 )
else:
lowerCAmelCase__ : Union[str, Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase__ : List[Any] = all_hidden_states + (hidden_states,)
lowerCAmelCase__ : Dict = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase__ : List[Any] = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase__ : List[Any] = outputs + (all_attentions,)
lowerCAmelCase__ : List[Any] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , a_ , )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Tuple ):
super().__init__(lowercase_ )
lowerCAmelCase__ : List[Any] = config
lowerCAmelCase__ : int = BertEmbeddings(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = DeeBertEncoder(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = BertPooler(lowercase_ )
self.init_weights()
def __lowerCAmelCase ( self : str ):
self.encoder.init_highway_pooler(self.pooler )
def __lowerCAmelCase ( self : Tuple ):
return self.embeddings.word_embeddings
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Optional[Any] ):
lowerCAmelCase__ : Any = value
def __lowerCAmelCase ( self : str ,lowercase_ : Tuple ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase_ )
@add_start_docstrings_to_model_forward(lowercase_ )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int=None ,lowercase_ : int=None ,lowercase_ : Tuple=None ,lowercase_ : Dict=None ,lowercase_ : Dict=None ,lowercase_ : Optional[int]=None ,lowercase_ : Optional[int]=None ,lowercase_ : str=None ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
lowerCAmelCase__ : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase__ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
lowerCAmelCase__ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase__ : Union[str, Any] = torch.ones(lowercase_ ,device=lowercase_ )
if encoder_attention_mask is None:
lowerCAmelCase__ : Tuple = torch.ones(lowercase_ ,device=lowercase_ )
if token_type_ids is None:
lowerCAmelCase__ : int = torch.zeros(lowercase_ ,dtype=torch.long ,device=lowercase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase__ : torch.Tensor = self.get_extended_attention_mask(lowercase_ ,lowercase_ ,lowercase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase__ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase__ : Dict = encoder_attention_mask[:, None, None, :]
lowerCAmelCase__ : List[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase__ : Any = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase__ : Tuple = self.get_head_mask(lowercase_ ,self.config.num_hidden_layers )
lowerCAmelCase__ : Any = self.embeddings(
input_ids=lowercase_ ,position_ids=lowercase_ ,token_type_ids=lowercase_ ,inputs_embeds=lowercase_ )
lowerCAmelCase__ : Tuple = self.encoder(
lowercase_ ,attention_mask=lowercase_ ,head_mask=lowercase_ ,encoder_hidden_states=lowercase_ ,encoder_attention_mask=lowercase_ ,)
lowerCAmelCase__ : str = encoder_outputs[0]
lowerCAmelCase__ : Dict = self.pooler(lowercase_ )
lowerCAmelCase__ : Any = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : Any ,lowercase_ : int ):
lowerCAmelCase__ : List[Any] = message
lowerCAmelCase__ : Any = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Any ):
super().__init__()
lowerCAmelCase__ : Optional[int] = BertPooler(lowercase_ )
lowerCAmelCase__ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : str = nn.Linear(config.hidden_size ,config.num_labels )
def __lowerCAmelCase ( self : Tuple ,lowercase_ : int ):
# Pooler
lowerCAmelCase__ : Tuple = encoder_outputs[0]
lowerCAmelCase__ : Optional[int] = self.pooler(lowercase_ )
# "return" pooler_output
# BertModel
lowerCAmelCase__ : List[str] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase__ : List[str] = bmodel_output[1]
lowerCAmelCase__ : str = self.dropout(lowercase_ )
lowerCAmelCase__ : List[str] = self.classifier(lowercase_ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , a_ , )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : str ):
super().__init__(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = config.num_labels
lowerCAmelCase__ : Optional[int] = config.num_hidden_layers
lowerCAmelCase__ : Any = DeeBertModel(lowercase_ )
lowerCAmelCase__ : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Union[str, Any] = nn.Linear(config.hidden_size ,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Tuple=None ,lowercase_ : Optional[int]=None ,lowercase_ : List[Any]=None ,lowercase_ : int=None ,lowercase_ : Tuple=None ,lowercase_ : Optional[Any]=None ,lowercase_ : Optional[int]=None ,lowercase_ : Dict=-1 ,lowercase_ : Dict=False ,):
lowerCAmelCase__ : Tuple = self.num_layers
try:
lowerCAmelCase__ : Dict = self.bert(
lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,position_ids=lowercase_ ,head_mask=lowercase_ ,inputs_embeds=lowercase_ ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase__ : Any = outputs[1]
lowerCAmelCase__ : str = self.dropout(lowercase_ )
lowerCAmelCase__ : List[Any] = self.classifier(lowercase_ )
lowerCAmelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : Dict = e.message
lowerCAmelCase__ : Dict = e.exit_layer
lowerCAmelCase__ : int = outputs[0]
if not self.training:
lowerCAmelCase__ : Union[str, Any] = entropy(lowercase_ )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[int] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : Optional[Any] = MSELoss()
lowerCAmelCase__ : str = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : int = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : Union[str, Any] = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : int = MSELoss()
lowerCAmelCase__ : str = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Optional[int] = CrossEntropyLoss()
lowerCAmelCase__ : Optional[int] = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowerCAmelCase__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Any = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 106 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : Tuple =_symbol_database.Default()
lowerCamelCase : List[str] =_descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCamelCase : str =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : Optional[int] =None
lowerCamelCase : Tuple =b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : List[str] =45
lowerCamelCase : List[Any] =1581
lowerCamelCase : Optional[int] =1517
lowerCamelCase : Tuple =1570
lowerCamelCase : Dict =1584
lowerCamelCase : Optional[Any] =1793
lowerCamelCase : Dict =1795
lowerCamelCase : Any =1916
lowerCamelCase : Dict =1864
lowerCamelCase : Dict =1905
lowerCamelCase : Dict =1919
lowerCamelCase : Union[str, Any] =2429
lowerCamelCase : List[Any] =2208
lowerCamelCase : List[Any] =2418
lowerCamelCase : List[str] =2323
lowerCamelCase : Dict =2407
# @@protoc_insertion_point(module_scope) | 189 | 0 |
"""simple docstring"""
import math
class lowerCamelCase :
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = 0.0
UpperCamelCase : Dict = 0.0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
UpperCamelCase : List[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
UpperCamelCase : Optional[Any] = SelfOrganizingMap()
UpperCamelCase : Tuple = 3
UpperCamelCase : int = 0.5
for _ in range(snake_case_ ):
for j in range(len(snake_case_ ) ):
# training sample
UpperCamelCase : int = training_samples[j]
# Compute the winning vector
UpperCamelCase : List[Any] = self_organizing_map.get_winner(snake_case_ ,snake_case_ )
# Update the winning vector
UpperCamelCase : str = self_organizing_map.update(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
# classify test sample
UpperCamelCase : List[Any] = [0, 0, 0, 1]
UpperCamelCase : Optional[Any] = self_organizing_map.get_winner(snake_case_ ,snake_case_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 353 |
"""simple docstring"""
import argparse
import os
import re
__A : Dict = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__A : Union[str, Any] = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Dict = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : List[str] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__A : Tuple = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : Tuple = re.compile(R'''\[([^\]]+)\]''')
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = _re_indent.search(snake_case_ )
return "" if search is None else search.groups()[0]
def A_ ( snake_case_ : Union[str, Any] ,snake_case_ : Dict="" ,snake_case_ : Dict=None ,snake_case_ : Any=None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = 0
UpperCamelCase : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(snake_case_ ):
index += 1
UpperCamelCase : Optional[Any] = ["""\n""".join(lines[:index] )]
else:
UpperCamelCase : int = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase : Any = [lines[index]]
index += 1
while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(snake_case_ ) )
if index < len(snake_case_ ) - 1:
UpperCamelCase : Any = [lines[index + 1]]
index += 1
else:
UpperCamelCase : List[str] = []
else:
blocks.append("""\n""".join(snake_case_ ) )
UpperCamelCase : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case_ ) > 0:
blocks.append("""\n""".join(snake_case_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
def _inner(snake_case_ : Tuple ):
return key(snake_case_ ).lower().replace("""_""" ,"""""" )
return _inner
def A_ ( snake_case_ : List[Any] ,snake_case_ : Optional[int]=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(snake_case_ : Dict ):
return x
if key is None:
UpperCamelCase : int = noop
# Constants are all uppercase, they go first.
UpperCamelCase : List[Any] = [obj for obj in objects if key(snake_case_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase : str = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase : List[str] = [obj for obj in objects if not key(snake_case_ )[0].isupper()]
UpperCamelCase : Tuple = ignore_underscore(snake_case_ )
return sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ ) + sorted(snake_case_ ,key=snake_case_ )
def A_ ( snake_case_ : int ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(snake_case_ : List[Any] ):
UpperCamelCase : Any = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
UpperCamelCase : Union[str, Any] = [part.strip().replace("""\"""" ,"""""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[str] = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(snake_case_ )] ) + "]"
UpperCamelCase : str = import_statement.split("""\n""" )
if len(snake_case_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase : str = 2 if lines[1].strip() == """[""" else 1
UpperCamelCase : Dict = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase : int = sort_objects(snake_case_ ,key=lambda snake_case_ : x[1] )
UpperCamelCase : Any = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase : List[Any] = _re_bracket_content.sub(_replace ,lines[1] )
else:
UpperCamelCase : Optional[Any] = [part.strip().replace("""\"""" ,"""""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase : List[Any] = keys[:-1]
UpperCamelCase : int = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(snake_case_ )] )
return "\n".join(snake_case_ )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase : List[str] = _re_bracket_content.sub(_replace ,snake_case_ )
return import_statement
def A_ ( snake_case_ : Tuple ,snake_case_ : str=True ):
'''simple docstring'''
with open(snake_case_ ,"""r""" ) as f:
UpperCamelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase : Dict = split_code_in_indented_blocks(
snake_case_ ,start_prompt="""_import_structure = {""" ,end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(snake_case_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase : Optional[Any] = main_blocks[block_idx]
UpperCamelCase : Optional[int] = block.split("""\n""" )
# Get to the start of the imports.
UpperCamelCase : Union[str, Any] = 0
while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase : List[str] = len(snake_case_ )
else:
line_idx += 1
if line_idx >= len(snake_case_ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase : Dict = """\n""".join(block_lines[line_idx:-1] )
UpperCamelCase : Union[str, Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase : Optional[int] = split_code_in_indented_blocks(snake_case_ ,indent_level=snake_case_ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase : Union[str, Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase : Union[str, Any] = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None]
UpperCamelCase : List[Any] = [x[0] for x in sorted(snake_case_ ,key=lambda snake_case_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase : str = 0
UpperCamelCase : List[Any] = []
for i in range(len(snake_case_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(snake_case_ )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case_ ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(snake_case_ ,"""w""" ) as f:
f.write("""\n""".join(snake_case_ ) )
def A_ ( snake_case_ : int=True ):
'''simple docstring'''
UpperCamelCase : Any = []
for root, _, files in os.walk(snake_case_ ):
if "__init__.py" in files:
UpperCamelCase : Union[str, Any] = sort_imports(os.path.join(snake_case_ ,"""__init__.py""" ) ,check_only=snake_case_ )
if result:
UpperCamelCase : Any = [os.path.join(snake_case_ ,"""__init__.py""" )]
if len(snake_case_ ) > 0:
raise ValueError(f'Would overwrite {len(snake_case_ )} files, run `make style`.' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__A : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 27 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : int = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __snake_case ( a ):
UpperCAmelCase__ : Dict = '''vit'''
def __init__( self : Any , _snake_case : List[Any]=768 , _snake_case : Any=12 , _snake_case : List[Any]=12 , _snake_case : Union[str, Any]=3072 , _snake_case : Dict="gelu" , _snake_case : List[str]=0.0 , _snake_case : List[Any]=0.0 , _snake_case : Optional[Any]=0.0_2 , _snake_case : Tuple=1e-12 , _snake_case : Dict=224 , _snake_case : Tuple=16 , _snake_case : int=3 , _snake_case : List[str]=True , _snake_case : Optional[int]=16 , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = encoder_stride
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 1e-4
| 51 |
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import logging
import os
from .state import PartialState
class _UpperCamelCase ( logging.LoggerAdapter ):
@staticmethod
def lowercase ( _SCREAMING_SNAKE_CASE: List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[Any] , *_SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
UpperCamelCase_ = kwargs.pop("main_process_only" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("in_order" , _SCREAMING_SNAKE_CASE )
if self.isEnabledFor(_SCREAMING_SNAKE_CASE ):
if self._should_log(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ , UpperCamelCase_ = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif in_order:
UpperCamelCase_ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase_ , UpperCamelCase_ = self.process(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.logger.log(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ) -> List[str]:
if log_level is None:
UpperCamelCase_ = os.environ.get("ACCELERATE_LOG_LEVEL" , UpperCamelCase_ )
UpperCamelCase_ = logging.getLogger(UpperCamelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase_ , {} )
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] =logging.get_logger(__name__)
A__ : List[Any] ={
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = '''markuplm'''
def __init__( self : Dict , __snake_case : Union[str, Any]=3_05_22 , __snake_case : Tuple=7_68 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : str=30_72 , __snake_case : List[Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[Any]=0.1 , __snake_case : Union[str, Any]=5_12 , __snake_case : List[str]=2 , __snake_case : int=0.02 , __snake_case : List[str]=1E-1_2 , __snake_case : Optional[int]=0 , __snake_case : str=0 , __snake_case : Tuple=2 , __snake_case : Tuple=2_56 , __snake_case : int=10_24 , __snake_case : Union[str, Any]=2_16 , __snake_case : List[str]=10_01 , __snake_case : Any=32 , __snake_case : Tuple=50 , __snake_case : str="absolute" , __snake_case : Tuple=True , __snake_case : List[Any]=None , **__snake_case : Union[str, Any] , ) -> List[Any]:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
# additional properties
_lowerCAmelCase = max_depth
_lowerCAmelCase = max_xpath_tag_unit_embeddings
_lowerCAmelCase = max_xpath_subs_unit_embeddings
_lowerCAmelCase = tag_pad_id
_lowerCAmelCase = subs_pad_id
_lowerCAmelCase = xpath_unit_hidden_size
| 70 |
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = 1_6000 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = False , _UpperCAmelCase = 80 , _UpperCAmelCase = 16 , _UpperCAmelCase = 64 , _UpperCAmelCase = "hann_window" , _UpperCAmelCase = 1.0 , _UpperCAmelCase = 80 , _UpperCAmelCase = 7600 , _UpperCAmelCase = 1e-1_0 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = do_normalize
__A : Dict = return_attention_mask
__A : Any = num_mel_bins
__A : Union[str, Any] = hop_length
__A : List[str] = win_length
__A : int = win_function
__A : Any = frame_signal_scale
__A : List[str] = fmin
__A : Optional[Any] = fmax
__A : Dict = mel_floor
__A : Optional[Any] = reduction_factor
__A : Optional[Any] = win_length * sampling_rate // 1000
__A : str = hop_length * sampling_rate // 1000
__A : Tuple = optimal_fft_length(self.sample_size)
__A : str = (self.n_fft // 2) + 1
__A : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase)
__A : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0):
'''simple docstring'''
if attention_mask is not None:
__A : List[Any] = np.array(_UpperCAmelCase , np.intaa)
__A : int = []
for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1)):
__A : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
__A : Dict = padding_value
normed_input_values.append(_UpperCAmelCase)
else:
__A : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = spectrogram(
_UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.')
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
if audio is not None:
__A : Dict = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
else:
__A : Union[str, Any] = None
if audio_target is not None:
__A : Optional[int] = self._process_audio(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , )
if inputs is None:
return inputs_target
else:
__A : Any = inputs_target['input_values']
__A : Union[str, Any] = inputs_target.get('attention_mask')
if decoder_attention_mask is not None:
__A : List[str] = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = isinstance(_UpperCAmelCase , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
__A : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__A : Dict = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray):
__A : List[str] = np.asarray(_UpperCAmelCase , dtype=np.floataa)
elif isinstance(_UpperCAmelCase , np.ndarray) and speech.dtype is np.dtype(np.floataa):
__A : List[Any] = speech.astype(np.floataa)
# always return batch
if not is_batched:
__A : Tuple = [speech]
# needed to make pad() work on spectrogram inputs
__A : Tuple = self.feature_size
# convert into correct format for padding
if is_target:
__A : Dict = [self._extract_mel_features(_UpperCAmelCase) for waveform in speech]
__A : Dict = BatchFeature({'input_values': features})
__A : Any = self.num_mel_bins
else:
__A : int = BatchFeature({'input_values': speech})
__A : Union[str, Any] = self.pad(
_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
__A : Any = feature_size_hack
# convert input values to correct format
__A : Union[str, Any] = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray):
__A : List[str] = [np.asarray(_UpperCAmelCase , dtype=np.floataa) for array in input_values]
elif (
not isinstance(_UpperCAmelCase , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
__A : Any = [array.astype(np.floataa) for array in input_values]
elif isinstance(_UpperCAmelCase , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
__A : List[Any] = input_values.astype(np.floataa)
# convert attention_mask to correct format
__A : int = padded_inputs.get('attention_mask')
if attention_mask is not None:
__A : Optional[int] = [np.asarray(_UpperCAmelCase , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__A : Optional[int] = (
attention_mask
if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A : str = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value)
if return_tensors is not None:
__A : Dict = padded_inputs.convert_to_tensors(_UpperCAmelCase)
return padded_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__A : str = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output | 190 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : str ) -> str:
__A : Optional[Any] = [0] * len(__snake_case )
__A : Dict = []
__A : Optional[int] = [1] * len(__snake_case )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__snake_case ) ):
if indegree[i] == 0:
queue.append(__snake_case )
while queue:
__A : int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__A : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__snake_case )
print(max(__snake_case ) )
# Adjacency list of Graph
lowercase__ : Dict = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 190 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_A = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
_A = 10
_A = 256
def __UpperCamelCase ( _A ):
if len(lowerCamelCase_ ) < MIN_NUM_TOKENS:
return None
lowerCAmelCase_ = MinHash(num_perm=lowerCamelCase_ )
for token in set(lowerCamelCase_ ):
min_hash.update(token.encode() )
return min_hash
def __UpperCamelCase ( _A ):
return {t for t in NON_ALPHA.split(lowerCamelCase_ ) if len(t.strip() ) > 0}
class A :
def __init__( self, *,
UpperCamelCase__ = 0.85, ):
"""simple docstring"""
lowerCAmelCase_ = duplication_jaccard_threshold
lowerCAmelCase_ = NUM_PERM
lowerCAmelCase_ = MinHashLSH(threshold=self._duplication_jaccard_threshold, num_perm=self._num_perm )
lowerCAmelCase_ = defaultdict(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self._index.query(__lowerCamelCase )
if code_key in self._index.keys:
print(f"Duplicate key {code_key}" )
return
self._index.insert(__lowerCamelCase, __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = []
for base, duplicates in self._duplicate_clusters.items():
lowerCAmelCase_ = [base] + list(__lowerCamelCase )
# reformat the cluster to be a list of dict
lowerCAmelCase_ = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__lowerCamelCase )
return duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.get_duplicate_clusters()
with open(__lowerCamelCase, '''w''' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = element
lowerCAmelCase_ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __UpperCamelCase ( _A ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCamelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = DuplicationIndex(duplication_jaccard_threshold=lowerCamelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCamelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCamelCase_ , lowerCamelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = get_tokens(lowerCamelCase_ )
lowerCAmelCase_ = get_tokens(lowerCamelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_A = None
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = []
for elementa in cluster:
lowerCAmelCase_ = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
lowerCAmelCase_ = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCamelCase_ , lowerCamelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowerCAmelCase_ = 1
extremes.append(lowerCamelCase_ )
return extremes
def __UpperCamelCase ( _A , _A , _A ):
global _shared_dataset
lowerCAmelCase_ = dataset
lowerCAmelCase_ = []
lowerCAmelCase_ = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCamelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCamelCase_ , lowerCamelCase_ , ) , total=len(lowerCamelCase_ ) , ):
extremes_list.append(lowerCamelCase_ )
return extremes_list
def __UpperCamelCase ( _A , _A = 0.8_5 ):
lowerCAmelCase_ = make_duplicate_clusters(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase_ = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
lowerCAmelCase_ = {}
lowerCAmelCase_ = find_extremes(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for extremes in extremes_clusters:
for element in extremes:
lowerCAmelCase_ = element
lowerCAmelCase_ = duplicate_indices - set(extreme_dict.keys() )
lowerCAmelCase_ = dataset.filter(lambda _A , _A : idx not in remove_indices , with_indices=lowerCamelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowerCAmelCase_ = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
lowerCAmelCase_ = extreme_dict[element['''base_index''']]['''copies''']
print(f"Original dataset size: {len(lowerCamelCase_ )}" )
print(f"Number of duplicate clusters: {len(lowerCamelCase_ )}" )
print(f"Files in duplicate cluster: {len(lowerCamelCase_ )}" )
print(f"Unique files in duplicate cluster: {len(lowerCamelCase_ )}" )
print(f"Filtered dataset size: {len(lowerCamelCase_ )}" )
return ds_filter, duplicate_clusters
| 278 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129 | 0 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class A_ :
def __init__(self :List[Any] , _UpperCamelCase :int )-> Optional[int]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__A = deepcopy(_UpperCamelCase )
elif os.path.exists(_UpperCamelCase ):
with io.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = json.load(_UpperCamelCase )
else:
try:
__A = baseaa.urlsafe_baadecode(_UpperCamelCase ).decode('''utf-8''' )
__A = json.loads(_UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__A = config
self.set_stage_and_offload()
def _lowerCAmelCase (self :int )-> Optional[Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__A = self.get_value('''zero_optimization.stage''' , -1 )
# offload
__A = False
if self.is_zeroa() or self.is_zeroa():
__A = set(['''cpu''', '''nvme'''] )
__A = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__A = True
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :Union[str, Any] )-> Union[str, Any]:
__A = self.config
# find the config node of interest if it exists
__A = ds_key_long.split('''.''' )
__A = nodes.pop()
for node in nodes:
__A = config.get(_UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[str] , _UpperCamelCase :Dict=None )-> str:
__A , __A = self.find_config_node(_UpperCamelCase )
if config is None:
return default
return config.get(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Union[str, Any]=False )-> Union[str, Any]:
__A = self.config
# find the config node of interest if it exists
__A = ds_key_long.split('''.''' )
for node in nodes:
__A = config
__A = config.get(_UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(_UpperCamelCase )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] )-> Dict:
__A = self.get_value(_UpperCamelCase )
return False if value is None else bool(_UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :Any )-> Any:
__A = self.get_value(_UpperCamelCase )
return False if value is None else not bool(_UpperCamelCase )
def _lowerCAmelCase (self :int )-> Union[str, Any]:
return self._stage == 2
def _lowerCAmelCase (self :str )-> Optional[Any]:
return self._stage == 3
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
return self._offload
class A_ :
def __init__(self :Dict , _UpperCamelCase :Optional[int] )-> Optional[Any]:
__A = engine
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Tuple , **_UpperCamelCase :List[str] )-> Optional[int]:
# runs backpropagation and handles mixed precision
self.engine.backward(_UpperCamelCase , **_UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class A_ ( _lowerCamelCase ):
def __init__(self :Dict , _UpperCamelCase :Optional[Any] )-> List[Any]:
super().__init__(_UpperCamelCase , device_placement=_UpperCamelCase , scaler=_UpperCamelCase )
__A = hasattr(self.optimizer , '''overflow''' )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :List[str]=None )-> Dict:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowerCAmelCase (self :Optional[int] )-> str:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class A_ ( _lowerCamelCase ):
def __init__(self :Dict , _UpperCamelCase :Dict , _UpperCamelCase :Dict )-> int:
super().__init__(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :int )-> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class A_ :
def __init__(self :Tuple , _UpperCamelCase :Optional[int] , _UpperCamelCase :Tuple=0.0_0_1 , _UpperCamelCase :List[str]=0 , **_UpperCamelCase :int )-> Union[str, Any]:
__A = params
__A = lr
__A = weight_decay
__A = kwargs
class A_ :
def __init__(self :Optional[int] , _UpperCamelCase :Optional[int] , _UpperCamelCase :Optional[Any]=None , _UpperCamelCase :Optional[int]=0 , **_UpperCamelCase :Union[str, Any] )-> Optional[int]:
__A = optimizer
__A = total_num_steps
__A = warmup_num_steps
__A = kwargs
| 250 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowercase ( lowerCAmelCase_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCamelCase_ : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ : Any = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
import PIL.Image
UpperCamelCase_ : Union[str, Any] = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=__a ) as mock_cast_to_python_objects:
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] , type=Image() ) )
UpperCamelCase_ : Optional[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , __a )
self.assertFalse(kwargs['optimize_list_casting'] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : int ):
UpperCamelCase_ : str = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Dict = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = pa.BufferOutputStream()
UpperCamelCase_ : Any = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
UpperCamelCase_ : int = pa.BufferOutputStream()
UpperCamelCase_ : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
UpperCamelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase_ : int = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : Tuple = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : pa.Table = f.read_all()
UpperCamelCase_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def __lowercase ( lowerCamelCase : Any ):
UpperCamelCase_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
UpperCamelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
UpperCamelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __lowercase ( lowerCamelCase : List[str] ):
UpperCamelCase_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
UpperCamelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[int] = pa.BufferOutputStream()
UpperCamelCase_ : int = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Tuple = pa.BufferOutputStream()
UpperCamelCase_ : str = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
UpperCamelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[str] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
UpperCamelCase_ : List[Any] = pa.BufferOutputStream()
UpperCamelCase_ : List[Any] = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
UpperCamelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
UpperCamelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , 'test.arrow' )
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
UpperCamelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(_SCREAMING_SNAKE_CASE , 1 )
def __lowercase ( lowerCamelCase : int ):
if pa.types.is_list(_SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int ):
if isinstance(lst[0] , _SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ : List[str] = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE , optimized_int_type=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
# in range
UpperCamelCase_ : Any = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase_ : Optional[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Dict = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Union[str, Any] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Optional[Any] = 'mock://dataset-train.arrow'
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_SCREAMING_SNAKE_CASE )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase_ : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
import PIL.Image
UpperCamelCase_ : List[Any] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE , format='png' )
UpperCamelCase_ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=_SCREAMING_SNAKE_CASE , features=Features({'image': Image()} ) , embed_local_files=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'image': image_path} )
writer.finalize()
UpperCamelCase_ : Any = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowercase ( ):
UpperCamelCase_ : Tuple = pa.schema([pa.field('col_1' , pa.string() , nullable=_SCREAMING_SNAKE_CASE )] )
UpperCamelCase_ : str = pa.BufferOutputStream()
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
import requests
A : List[str] = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def UpperCamelCase ( __magic_name__ : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 353 |
A : Union[str, Any] = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
lowercase__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
lowercase__ = 0
lowercase__ = 0
while place < len(__magic_name__ ):
if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase ( __magic_name__ : int ) -> str:
"""simple docstring"""
lowercase__ = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ )
result.append(roman * factor )
if number == 0:
break
return "".join(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['image_processor', 'tokenizer']
_snake_case = 'AutoImageProcessor'
_snake_case = 'AutoTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
__UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 328 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase : Union[str, Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowercase_ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowercase_ = DEFAULTS.copy()
cfg_kwargs.update(__lowerCAmelCase )
lowercase_ = PegasusConfig(**__lowerCAmelCase )
lowercase_ = PegasusForConditionalGeneration(__lowerCAmelCase )
lowercase_ = torch_model.model.state_dict()
lowercase_ = {}
for k, v in tf_weights.items():
lowercase_ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase_ = v.T
lowercase_ = torch.tensor(__lowerCAmelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase_ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase_ = mapping["""shared.weight"""]
lowercase_ = mapping["""shared.weight"""]
lowercase_ = {k: torch.zeros_like(__lowerCAmelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__lowerCAmelCase )
lowercase_ , lowercase_ = torch_model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
lowercase_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowercase_ = tf.train.list_variables(__lowerCAmelCase )
lowercase_ = {}
lowercase_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(__lowerCAmelCase , desc="""converting tf checkpoint to dict""" ):
lowercase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase_ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = array
return tf_weights
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = Path(__lowerCAmelCase ).parent.name
lowercase_ = task_specific_params[F'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase_ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__lowerCAmelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCAmelCase )
# convert model
lowercase_ = get_tf_weights_as_numpy(__lowerCAmelCase )
lowercase_ = task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
lowercase_ = task_specific_params
lowercase_ = convert_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
lowercase_ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__lowerCAmelCase , Path(__lowerCAmelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
UpperCAmelCase : List[Any] = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase : List[str] = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase : int = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 364 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None):
"""simple docstring"""
lowercase_ = self.layer[current_layer](lowerCAmelCase_ , lowerCAmelCase_ , head_mask[current_layer])
lowercase_ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = BertEncoderWithPabee(lowerCAmelCase_)
self.init_weights()
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = threshold
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = patience
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 0
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.inference_layers_num / self.inference_instances_num
lowercase_ = (
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase_)
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""")
elif input_ids is not None:
lowercase_ = input_ids.size()
elif inputs_embeds is not None:
lowercase_ = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""")
lowercase_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
if token_type_ids is None:
lowercase_ = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase_ = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
lowercase_ , lowercase_ , lowercase_ = encoder_hidden_states.size()
lowercase_ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
lowercase_ = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_)
lowercase_ = self.invert_attention_mask(lowerCAmelCase_)
else:
lowercase_ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase_ = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers)
lowercase_ = self.embeddings(
input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_)
lowercase_ = embedding_output
if self.training:
lowercase_ = []
for i in range(self.config.num_hidden_layers):
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](output_dropout(lowerCAmelCase_))
res.append(lowerCAmelCase_)
elif self.patience == 0: # Use all layers for inference
lowercase_ = self.encoder(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
lowercase_ = self.pooler(encoder_outputs[0])
lowercase_ = [output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase_)]
else:
lowercase_ = 0
lowercase_ = None
lowercase_ = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
lowercase_ = self.encoder.adaptive_forward(
lowerCAmelCase_ , current_layer=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_)
lowercase_ = self.pooler(lowerCAmelCase_)
lowercase_ = output_layers[i](lowerCAmelCase_)
if regression:
lowercase_ = logits.detach()
if patient_result is not None:
lowercase_ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
lowercase_ = 0
else:
lowercase_ = logits.detach().argmax(dim=1)
if patient_result is not None:
lowercase_ = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase_)):
patient_counter += 1
else:
lowercase_ = 0
lowercase_ = logits
if patient_counter == self.patience:
break
lowercase_ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , __UpperCAmelCase , )
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , lowerCAmelCase_ : str):
"""simple docstring"""
super().__init__(lowerCAmelCase_)
lowercase_ = config.num_labels
lowercase_ = BertModelWithPabee(lowerCAmelCase_)
lowercase_ = nn.Dropout(config.hidden_dropout_prob)
lowercase_ = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=None , ):
"""simple docstring"""
lowercase_ = self.bert(
input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
lowercase_ = (logits[-1],)
if labels is not None:
lowercase_ = None
lowercase_ = 0
for ix, logits_item in enumerate(lowerCAmelCase_):
if self.num_labels == 1:
# We are doing regression
lowercase_ = MSELoss()
lowercase_ = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
lowercase_ = CrossEntropyLoss()
lowercase_ = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
lowercase_ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
lowercase_ = (total_loss / total_weights,) + outputs
return outputs
| 313 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowercase__ : Any = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowercase__ : Dict = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowercase__ : Tuple = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Tuple = spearmanr(_UpperCAmelCase , _UpperCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 190 |
'''simple docstring'''
import math
import qiskit
def _lowerCAmelCase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__A : int = qiskit.QuantumRegister(4 , 'qr' )
__A : Optional[int] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__A : Union[str, Any] = [input_a, input_a, carry_in]
__A : Dict = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
__A : str = qiskit.Aer.get_backend('aer_simulator' )
__A : Any = qiskit.execute(__snake_case , __snake_case , shots=10_00 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 190 | 1 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def SCREAMING_SNAKE_CASE_ ( ) -> str:
_SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(__A ) )
_SCREAMING_SNAKE_CASE = os.path.join(__A , "words.txt" )
_SCREAMING_SNAKE_CASE = ""
with open(__A ) as f:
_SCREAMING_SNAKE_CASE = f.readline()
_SCREAMING_SNAKE_CASE = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
_SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(__A ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__A )
if __name__ == "__main__":
print(solution())
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = len(__A )
_SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_SCREAMING_SNAKE_CASE = []
for char_count in range(__A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 111 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _A ( snake_case ) -> Dict:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class a__ ( lowerCamelCase_ ):
@staticmethod
def _lowerCamelCase ( _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=_UpperCamelCase , default=_UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=_UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=_UpperCamelCase )
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = model
_lowercase : List[Any] = cache
_lowercase : Union[str, Any] = force
_lowercase : Dict = trust_remote_code
def _lowerCamelCase ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 250 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Tuple = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
_lowercase : Optional[int] = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def lowerCamelCase__ ( A : Dict , A : str ):
'''simple docstring'''
UpperCAmelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( A : Any , A : Tuple , A : Dict , A : Optional[Any] , A : Union[str, Any] ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase = BloomConfig()
else:
UpperCAmelCase = BloomConfig.from_json_file(A )
if shard_model:
UpperCAmelCase = os.listdir(A )
UpperCAmelCase = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
UpperCAmelCase = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
UpperCAmelCase = None
for i in range(A ):
# load all TP files
UpperCAmelCase = file.replace('''model_00''' , f"""model_0{i}""" )
UpperCAmelCase = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(A )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
UpperCAmelCase = BloomConfig()
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
UpperCAmelCase = BloomModel(A )
UpperCAmelCase = os.listdir(A )
UpperCAmelCase = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
UpperCAmelCase = None
for i, file in enumerate(A ):
UpperCAmelCase = None
for i in range(A ):
# load all TP files
UpperCAmelCase = file.replace('''model_00''' , f"""model_0{i}""" )
UpperCAmelCase = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase = list(temp.keys() )
for key in keys:
UpperCAmelCase = temp.pop(A )
if tensors is None:
UpperCAmelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase = tensors[key] / pretraining_tp
UpperCAmelCase = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase = set(other_keys.missing_keys )
else:
UpperCAmelCase = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
_lowercase : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 360 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class UpperCamelCase__:
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : int="resnet50" , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=True , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = out_indices if out_indices is not None else [4]
UpperCAmelCase = stage_names
UpperCAmelCase = out_features
UpperCAmelCase = backbone
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = use_pretrained_backbone
UpperCAmelCase = is_training
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = TimmBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[str] = (TimmBackbone,) if is_torch_available() else ()
__magic_name__ : Any = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
__magic_name__ : Union[str, Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
__magic_name__ : List[str] = False
def a__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = TimmBackboneModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '''resnet18'''
UpperCAmelCase = '''microsoft/resnet-18'''
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , use_timm_backbone=lowerCAmelCase , out_indices=[1, 2, 3] )
UpperCAmelCase = AutoBackbone.from_pretrained(lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a__( self : Dict )-> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a__( self : str )-> str:
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a__( self : Any )-> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : List[Any] )-> int:
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a__( self : str )-> Tuple:
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__( self : int )-> Tuple:
"""simple docstring"""
pass
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase = self.all_model_classes[0]
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase = copy.deepcopy(lowerCAmelCase )
UpperCAmelCase = None
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase = copy.deepcopy(lowerCAmelCase )
UpperCAmelCase = False
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(**lowerCAmelCase )
| 91 | 0 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
return math.sqrt(__lowercase ) * math.sqrt(__lowercase ) == num
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = n
while left <= right:
_UpperCAmelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_UpperCAmelCase = mid - 1
else:
_UpperCAmelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __magic_name__ ( __lowerCAmelCase):
A: torch.FloatTensor
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase):
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 64 , lowerCamelCase__ : int = 20 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : Optional[Any]=77 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : str = "silu" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "linear" , lowerCamelCase__ : Optional[str] = "prd" , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = attention_head_dim
UpperCamelCase__ : List[str] = num_attention_heads * attention_head_dim
UpperCamelCase__ : Union[str, Any] = additional_embeddings
UpperCamelCase__ : Union[str, Any] = time_embed_dim or inner_dim
UpperCamelCase__ : int = embedding_proj_dim or embedding_dim
UpperCamelCase__ : Optional[Any] = clip_embed_dim or embedding_dim
UpperCamelCase__ : Dict = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
UpperCamelCase__ : List[Any] = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
UpperCamelCase__ : int = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase__ : Optional[int] = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase__ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
UpperCamelCase__ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
UpperCamelCase__ : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
UpperCamelCase__ : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase__ : str = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='''gelu''' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
UpperCamelCase__ : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
UpperCamelCase__ : int = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase__ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , lowerCamelCase__ , persistent=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = {}
def fn_recursive_add_processors(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
UpperCamelCase__ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[torch.BoolTensor] = None , lowerCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = hidden_states.shape[0]
UpperCamelCase__ : List[Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCamelCase__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : List[str] = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase__ : int = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase__ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase__ : Optional[Any] = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
UpperCamelCase__ : Dict = self.embedding_proj_norm(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase__ : int = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCamelCase__ : Optional[int] = self.proj_in(lowerCamelCase__ )
UpperCamelCase__ : int = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase__ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase__ : Dict = hidden_states[:, None, :]
UpperCamelCase__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase__ : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase__ : List[str] = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase__ : int = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase__ : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCamelCase__ : Any = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase__ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase__ : List[str] = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
UpperCamelCase__ : Any = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
UpperCamelCase__ : Optional[int] = hidden_states[:, -1]
else:
UpperCamelCase__ : int = hidden_states[:, additional_embeddings_len:]
UpperCamelCase__ : List[str] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 146 | 0 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a : Any = False
a : Dict = logging.get_logger(__name__)
a : Optional[int] = 'ybelkada/fonts'
def lowerCAmelCase_ ():
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] ):
"""simple docstring"""
requires_backends(a__ , ["""torch"""] )
_check_torch_version()
UpperCAmelCase_: Optional[Any] = image_tensor.unsqueeze(0 )
UpperCAmelCase_: int = torch.nn.functional.unfold(a__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCAmelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , a__ , a__ , -1 )
UpperCAmelCase_: List[Any] = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: int = 3_6 , lowerCAmelCase__: str = "black" , lowerCAmelCase__: str = "white" , lowerCAmelCase__: int = 5 , lowerCAmelCase__: int = 5 , lowerCAmelCase__: int = 5 , lowerCAmelCase__: int = 5 , lowerCAmelCase__: Optional[bytes] = None , lowerCAmelCase__: Optional[str] = None , ):
"""simple docstring"""
requires_backends(a__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCAmelCase_: Optional[Any] = textwrap.TextWrapper(width=8_0 )
UpperCAmelCase_: Optional[Any] = wrapper.wrap(text=a__ )
UpperCAmelCase_: Optional[Any] = """\n""".join(a__ )
if font_bytes is not None and font_path is None:
UpperCAmelCase_: Optional[int] = io.BytesIO(a__ )
elif font_path is not None:
UpperCAmelCase_: Union[str, Any] = font_path
else:
UpperCAmelCase_: Union[str, Any] = hf_hub_download(a__ , """Arial.TTF""" )
UpperCAmelCase_: Optional[int] = ImageFont.truetype(a__ , encoding="""UTF-8""" , size=a__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCAmelCase_: Tuple = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , a__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = temp_draw.textbbox((0, 0) , a__ , a__ )
# Create the actual image with a bit of padding around the text.
UpperCAmelCase_: Optional[Any] = text_width + left_padding + right_padding
UpperCAmelCase_: Dict = text_height + top_padding + bottom_padding
UpperCAmelCase_: Optional[int] = Image.new("""RGB""" , (image_width, image_height) , a__ )
UpperCAmelCase_: Union[str, Any] = ImageDraw.Draw(a__ )
draw.text(xy=(left_padding, top_padding) , text=a__ , fill=a__ , font=a__ )
return image
def lowerCAmelCase_ (lowerCAmelCase__: np.ndarray , lowerCAmelCase__: str , **lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
requires_backends(a__ , """vision""" )
# Convert to PIL image if necessary
UpperCAmelCase_: Optional[int] = to_pil_image(a__ )
UpperCAmelCase_: Optional[int] = render_text(a__ , **a__ )
UpperCAmelCase_: int = max(header_image.width , image.width )
UpperCAmelCase_: Dict = int(image.height * (new_width / image.width) )
UpperCAmelCase_: int = int(header_image.height * (new_width / header_image.width) )
UpperCAmelCase_: int = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCAmelCase_: int = to_numpy_array(a__ )
if infer_channel_dimension_format(a__ ) == ChannelDimension.LAST:
UpperCAmelCase_: int = to_channel_dimension_format(a__ , ChannelDimension.LAST )
return new_image
class _a ( _lowercase ):
A = ['''flattened_patches''']
def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = 2048, SCREAMING_SNAKE_CASE_ = False, **SCREAMING_SNAKE_CASE_, ) -> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase_: Any = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCAmelCase_: str = do_normalize
UpperCAmelCase_: List[Any] = do_convert_rgb
UpperCAmelCase_: Dict = max_patches
UpperCAmelCase_: Union[str, Any] = is_vqa
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
requires_backends(self.extract_flattened_patches, """torch""" )
_check_torch_version()
# convert to torch
UpperCAmelCase_: Optional[Any] = to_channel_dimension_format(__UpperCamelCase, ChannelDimension.FIRST )
UpperCAmelCase_: Optional[int] = torch.from_numpy(__UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCAmelCase_ , UpperCAmelCase_: Any = get_image_size(__UpperCamelCase )
# maximize scale s.t.
UpperCAmelCase_: Any = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCAmelCase_: str = max(min(math.floor(scale * image_height / patch_height ), __UpperCamelCase ), 1 )
UpperCAmelCase_: Any = max(min(math.floor(scale * image_width / patch_width ), __UpperCamelCase ), 1 )
UpperCAmelCase_: str = max(num_feasible_rows * patch_height, 1 )
UpperCAmelCase_: Optional[Any] = max(num_feasible_cols * patch_width, 1 )
UpperCAmelCase_: Union[str, Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ), size=(resized_height, resized_width), mode="""bilinear""", align_corners=__UpperCamelCase, antialias=__UpperCamelCase, ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCAmelCase_: List[Any] = torch_extract_patches(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
UpperCAmelCase_: Optional[Any] = patches.shape
UpperCAmelCase_: Union[str, Any] = patches_shape[1]
UpperCAmelCase_: Tuple = patches_shape[2]
UpperCAmelCase_: str = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCAmelCase_: int = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCAmelCase_: Tuple = torch.arange(__UpperCamelCase ).reshape([rows, 1] ).repeat(1, __UpperCamelCase ).reshape([rows * columns, 1] )
UpperCAmelCase_: str = torch.arange(__UpperCamelCase ).reshape([1, columns] ).repeat(__UpperCamelCase, 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCAmelCase_: int = row_ids.to(torch.floataa )
UpperCAmelCase_: List[Any] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_: List[Any] = torch.cat([row_ids, col_ids, patches], -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCAmelCase_: Dict = torch.nn.functional.pad(__UpperCamelCase, [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCAmelCase_: Optional[Any] = to_numpy_array(__UpperCamelCase )
return result
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
if image.dtype == np.uinta:
UpperCAmelCase_: Union[str, Any] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCAmelCase_: int = np.mean(__UpperCamelCase )
UpperCAmelCase_: Dict = np.std(__UpperCamelCase )
UpperCAmelCase_: List[Any] = max(__UpperCamelCase, 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__UpperCamelCase, mean=__UpperCamelCase, std=__UpperCamelCase, **__UpperCamelCase )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> ImageInput:
UpperCAmelCase_: int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_: Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_: Union[str, Any] = patch_size if patch_size is not None else self.patch_size
UpperCAmelCase_: List[str] = max_patches if max_patches is not None else self.max_patches
UpperCAmelCase_: List[str] = self.is_vqa
if kwargs.get("""data_format""", __UpperCamelCase ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCAmelCase_: Any = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_: Dict = [convert_to_rgb(__UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_: int = [to_numpy_array(__UpperCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCAmelCase_: str = kwargs.pop("""font_bytes""", __UpperCamelCase )
UpperCAmelCase_: str = kwargs.pop("""font_path""", __UpperCamelCase )
if isinstance(__UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase_: Optional[Any] = [header_text] * len(__UpperCamelCase )
UpperCAmelCase_: int = [
render_header(__UpperCamelCase, header_text[i], font_bytes=__UpperCamelCase, font_path=__UpperCamelCase )
for i, image in enumerate(__UpperCamelCase )
]
if do_normalize:
UpperCAmelCase_: List[str] = [self.normalize(image=__UpperCamelCase ) for image in images]
# convert to torch tensor and permute
UpperCAmelCase_: Optional[Any] = [
self.extract_flattened_patches(image=__UpperCamelCase, max_patches=__UpperCamelCase, patch_size=__UpperCamelCase )
for image in images
]
# create attention mask in numpy
UpperCAmelCase_: Optional[int] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCAmelCase_: List[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks}, tensor_type=__UpperCamelCase )
return encoded_outputs
| 360 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class _a ( _lowerCAmelCase ):
A = '''encodec'''
def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
UpperCAmelCase_: List[Any] = target_bandwidths
UpperCAmelCase_: str = sampling_rate
UpperCAmelCase_: Any = audio_channels
UpperCAmelCase_: List[str] = normalize
UpperCAmelCase_: List[Any] = chunk_length_s
UpperCAmelCase_: List[Any] = overlap
UpperCAmelCase_: Any = hidden_size
UpperCAmelCase_: str = num_filters
UpperCAmelCase_: Any = num_residual_layers
UpperCAmelCase_: int = upsampling_ratios
UpperCAmelCase_: Tuple = norm_type
UpperCAmelCase_: Union[str, Any] = kernel_size
UpperCAmelCase_: str = last_kernel_size
UpperCAmelCase_: Union[str, Any] = residual_kernel_size
UpperCAmelCase_: str = dilation_growth_rate
UpperCAmelCase_: int = use_causal_conv
UpperCAmelCase_: int = pad_mode
UpperCAmelCase_: List[Any] = compress
UpperCAmelCase_: Dict = num_lstm_layers
UpperCAmelCase_: List[Any] = trim_right_ratio
UpperCAmelCase_: List[Any] = codebook_size
UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase_: Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __snake_case (self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __snake_case (self ) -> int:
UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __snake_case (self ) -> int:
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =tempfile.mkdtemp()
# fmt: off
__lowercase =['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowercase =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase))))
__lowercase =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowercase ={'''unk_token''': '''<unk>'''}
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_lowerCamelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_lowerCamelCase))
__lowercase ={
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase =os.path.join(self.tmpdirname , _lowerCamelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_lowerCamelCase , _lowerCamelCase)
def __lowerCamelCase ( self : str , **_lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_lowerCamelCase)
def __lowerCamelCase ( self : Optional[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_lowerCamelCase)
def __lowerCamelCase ( self : Any , **_lowerCAmelCase : Optional[int]):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__lowercase =[Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =self.get_rust_tokenizer()
__lowercase =self.get_image_processor()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
processor_slow.save_pretrained(self.tmpdirname)
__lowercase =OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase)
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
processor_fast.save_pretrained(self.tmpdirname)
__lowercase =OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase)
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase)
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowercase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__lowercase =self.get_image_processor(do_normalize=_lowerCamelCase)
__lowercase =OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _lowerCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowerCamelCase)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =image_processor(_lowerCamelCase , return_tensors='np')
__lowercase =processor(images=_lowerCamelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
__lowercase ='''lower newer'''
__lowercase =processor(text=_lowerCamelCase , return_tensors='np')
__lowercase =tokenizer(_lowerCamelCase , return_tensors='np')
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
__lowercase ='''lower newer'''
__lowercase =self.prepare_image_inputs()
__lowercase =processor(text=_lowerCamelCase , images=_lowerCamelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase):
processor()
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='''google/owlvit-base-patch32'''
__lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase)
__lowercase =['''cat''', '''nasa badge''']
__lowercase =processor(text=_lowerCamelCase)
__lowercase =1_6
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase):
processor()
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase ='''google/owlvit-base-patch32'''
__lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase)
__lowercase =[['''cat''', '''nasa badge'''], ['''person''']]
__lowercase =processor(text=_lowerCamelCase)
__lowercase =1_6
__lowercase =len(_lowerCamelCase)
__lowercase =max([len(_lowerCamelCase) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase):
processor()
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase ='''google/owlvit-base-patch32'''
__lowercase =OwlViTProcessor.from_pretrained(_lowerCamelCase)
__lowercase =['''cat''', '''nasa badge''']
__lowercase =processor(text=_lowerCamelCase)
__lowercase =1_6
__lowercase =inputs['''input_ids''']
__lowercase =[
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask'])
self.assertEqual(inputs['input_ids'].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
__lowercase =self.prepare_image_inputs()
__lowercase =self.prepare_image_inputs()
__lowercase =processor(images=_lowerCamelCase , query_images=_lowerCamelCase)
self.assertListEqual(list(inputs.keys()) , ['query_pixel_values', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase):
processor()
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_image_processor()
__lowercase =self.get_tokenizer()
__lowercase =OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase)
__lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase =processor.batch_decode(_lowerCamelCase)
__lowercase =tokenizer.batch_decode(_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , _lowerCamelCase)
| 166 |
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( a__ ):
"""simple docstring"""
def __init__( self ) ->List[str]:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : List[Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Union[str, Any]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Any:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : Optional[Any] = token_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.token_ids )
SCREAMING_SNAKE_CASE : Any = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : Any = False
def __lowerCAmelCase ( self ) ->List[Any]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : str = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Union[str, Any] = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
def __lowerCAmelCase ( self ) ->Any:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Dict:
SCREAMING_SNAKE_CASE : Any = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Dict = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Dict:
SCREAMING_SNAKE_CASE : Any = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : List[str] = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : Optional[Any] = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Tuple = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : List[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : int = start[current_token]
SCREAMING_SNAKE_CASE : Optional[int] = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Any = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : List[str] = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = False
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : str = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Any:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE : Dict = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : List[Any] = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : List[Any] = []
def __lowerCAmelCase ( self ) ->Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[str]:
SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : int = self.current_seq
SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : List[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : str = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : List[str] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[int] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : str = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : str = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->str:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : str = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowercase ( __A ,__A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__UpperCamelCase = number_of_bytes // partitions
__UpperCamelCase = []
for i in range(__A ):
__UpperCamelCase = i * bytes_per_partition + 1
__UpperCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = filter(lambda __A : p.requires_grad ,model.parameters() )
__UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a__ : Optional[Any] = logging.getLogger(__name__)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if metric == "rouge2":
__UpperCamelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
__UpperCamelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
__UpperCamelCase = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
__UpperCamelCase = ModelCheckpoint(
dirpath=__A ,filename=__A ,monitor=f"val_{metric}" ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def _lowercase ( __A ,__A ):
'''simple docstring'''
return EarlyStopping(
monitor=f"val_{metric}" ,mode="""min""" if """loss""" in metric else """max""" ,patience=__A ,verbose=__A ,)
class UpperCAmelCase__ ( pl.Callback):
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase=True ) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
__UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__UpperCamelCase = od / """test_results.txt"""
__UpperCamelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__UpperCamelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__UpperCamelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowercase )
generations_file.parent.mkdir(exist_ok=lowercase )
with open(lowercase , """a+""" ) as writer:
for key in sorted(lowercase ):
if key in ["log", "progress_bar", "preds"]:
continue
__UpperCamelCase = metrics[key]
if isinstance(lowercase , torch.Tensor ):
__UpperCamelCase = val.item()
__UpperCamelCase = f"{key}: {val:.6f}\n"
writer.write(lowercase )
if not save_generations:
return
if "preds" in metrics:
__UpperCamelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(lowercase )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
try:
__UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
__UpperCamelCase = pl_module.model.num_parameters()
__UpperCamelCase = count_trainable_parameters(lowercase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowercase , lowercase , """test""" )
@rank_zero_only
def __lowerCamelCase ( self , lowercase , lowercase ) -> int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 243 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = "Hello, World!"
__UpperCAmelCase : Any = "en_XX"
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Optional[Any] = Path("""data_bin""")
__snake_case: Tuple = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE__).parent) , checkpoint_file=Path(SCREAMING_SNAKE_CASE__).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(SCREAMING_SNAKE_CASE__) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE__).parent / """sentencepiece.bpe.model""") , src_dict=str(data_dir / """dict.txt""") , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE__)
__snake_case: int = xmod.model.encoder.sentence_encoder
__snake_case: Optional[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__snake_case: Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = XmodForSequenceClassification(SCREAMING_SNAKE_CASE__) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE__)
model.eval()
# Now let's copy all the weights.
# Embeddings
__snake_case: int = xmod_sent_encoder.embed_tokens.weight
__snake_case: Optional[Any] = xmod_sent_encoder.embed_positions.weight
__snake_case: str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
__snake_case: Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
__snake_case: List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
__snake_case: int = model.roberta.encoder.layer[i]
__snake_case: Tuple = xmod_sent_encoder.layers[i]
# self attention
__snake_case: Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError("""Dimensions of self-attention weights do not match.""")
__snake_case: int = xmod_layer.self_attn.q_proj.weight
__snake_case: Tuple = xmod_layer.self_attn.q_proj.bias
__snake_case: List[str] = xmod_layer.self_attn.k_proj.weight
__snake_case: Any = xmod_layer.self_attn.k_proj.bias
__snake_case: Dict = xmod_layer.self_attn.v_proj.weight
__snake_case: Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
__snake_case: Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""")
__snake_case: Dict = xmod_layer.self_attn.out_proj.weight
__snake_case: List[Any] = xmod_layer.self_attn.out_proj.bias
__snake_case: Optional[Any] = xmod_layer.self_attn_layer_norm.weight
__snake_case: int = xmod_layer.self_attn_layer_norm.bias
# intermediate
__snake_case: List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""")
__snake_case: Optional[int] = xmod_layer.fca.weight
__snake_case: List[Any] = xmod_layer.fca.bias
# output
__snake_case: Tuple = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""")
__snake_case: Optional[Any] = xmod_layer.fca.weight
__snake_case: Any = xmod_layer.fca.bias
__snake_case: str = xmod_layer.final_layer_norm.weight
__snake_case: Optional[int] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__snake_case: List[str] = xmod_layer.adapter_layer_norm.weight
__snake_case: Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError("""Lists of language adapters do not match.""")
for lang_code, adapter in xmod_layer.adapter_modules.items():
__snake_case: Tuple = bert_output.adapter_modules[lang_code]
__snake_case: Optional[Any] = xmod_layer.adapter_modules[lang_code]
__snake_case: Optional[int] = from_adapter.fca.weight
__snake_case: int = from_adapter.fca.bias
__snake_case: List[str] = from_adapter.fca.weight
__snake_case: Optional[Any] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__snake_case: Any = xmod_sent_encoder.layer_norm.weight
__snake_case: Optional[int] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__snake_case: Optional[int] = xmod.model.classification_heads["""mnli"""].dense.weight
__snake_case: int = xmod.model.classification_heads["""mnli"""].dense.bias
__snake_case: Tuple = xmod.model.classification_heads["""mnli"""].out_proj.weight
__snake_case: int = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__snake_case: str = xmod.model.encoder.lm_head.dense.weight
__snake_case: str = xmod.model.encoder.lm_head.dense.bias
__snake_case: List[Any] = xmod.model.encoder.lm_head.layer_norm.weight
__snake_case: List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__snake_case: List[Any] = xmod.model.encoder.lm_head.weight
__snake_case: Union[str, Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__snake_case: List[Any] = xmod.encode(SCREAMING_SNAKE_CASE__).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__)[0]
if classification_head:
__snake_case: List[Any] = xmod.model.classification_heads["""mnli"""](xmod.extract_features(SCREAMING_SNAKE_CASE__))
else:
__snake_case: int = xmod.model(SCREAMING_SNAKE_CASE__ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
__snake_case: List[str] = torch.max(torch.abs(our_output - their_output)).item()
print(F'''max_absolute_diff = {max_absolute_diff}''') # ~ 1e-7
__snake_case: Any = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3)
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""")
if not success:
raise Exception("""Something went wRoNg""")
Path(SCREAMING_SNAKE_CASE__).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
print(F'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__UpperCAmelCase : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 111 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """ibert"""
def __init__( self : Dict , A : Union[str, Any]=30_522 , A : List[Any]=768 , A : List[Any]=12 , A : Optional[int]=12 , A : Optional[Any]=3_072 , A : int="gelu" , A : str=0.1 , A : List[Any]=0.1 , A : Optional[Any]=512 , A : int=2 , A : Union[str, Any]=0.02 , A : List[str]=1E-12 , A : Optional[int]=1 , A : Optional[int]=0 , A : List[str]=2 , A : str="absolute" , A : Any=False , A : Optional[Any]="none" , **A : Any , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: List[Any] = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: List[str] = num_hidden_layers
__snake_case: Tuple = num_attention_heads
__snake_case: List[str] = hidden_act
__snake_case: Optional[Any] = intermediate_size
__snake_case: Tuple = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Any = max_position_embeddings
__snake_case: int = type_vocab_size
__snake_case: List[str] = initializer_range
__snake_case: List[Any] = layer_norm_eps
__snake_case: Optional[int] = position_embedding_type
__snake_case: str = quant_mode
__snake_case: Optional[int] = force_dequant
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Tuple ):
if self.task == "multiple-choice":
__snake_case: List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case: List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 111 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = GPTSwaTokenizer(__a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 'This is a test'
UpperCAmelCase__ = 'This is a test'
return input_text, output_text
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = '<s>'
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__a ) , 2000 )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = GPTSwaTokenizer(__a )
UpperCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [465, 287, 265, 631, 842] )
UpperCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__a )
# fmt: off
self.assertListEqual(
__a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = GPTSwaTokenizer(__a )
UpperCAmelCase__ = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCAmelCase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a , __a ):
self.assertListEqual(tokenizer.encode_fast(__a ) , __a )
# Test that decode_fast returns the input text
for text, token_ids in zip(__a , __a ):
self.assertEqual(tokenizer.decode_fast(__a ) , __a )
@slow
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCAmelCase__ = {'input_ids': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__a , )
| 357 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase__ = model_name.find('patch' )
UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
UpperCAmelCase__ = 12
UpperCAmelCase__ = 10_24
UpperCAmelCase__ = 40_96
UpperCAmelCase__ = 16
UpperCAmelCase__ = 24
UpperCAmelCase__ = 7_68
UpperCAmelCase__ = 30_72
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = 3_36
UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ )
if "large" in model_name:
UpperCAmelCase__ = 7_68
return config
def UpperCamelCase_( snake_case__: Any ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCAmelCase__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCAmelCase__ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase__ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn.in_proj" in key:
UpperCAmelCase__ = key.split('.' )
if key.startswith('visual' ):
UpperCAmelCase__ = key_split[3]
UpperCAmelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase__ = val[
:dim, :
]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[
-dim:, :
]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
elif key.startswith('mit' ):
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[dim : dim * 2]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = key_split[2]
UpperCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = rename_key(snake_case__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase__ = val.T
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]:
if num_frames == 8:
UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCAmelCase__ = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy'
UpperCAmelCase__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , )
UpperCAmelCase__ = np.load(snake_case__ )
return list(snake_case__ )
def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]:
UpperCAmelCase__ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCAmelCase__ = model_to_url[model_name]
UpperCAmelCase__ = 8
if "16-frames" in model_name:
UpperCAmelCase__ = 16
elif "shot" in model_name:
UpperCAmelCase__ = 32
UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase__ = 'pytorch_model.bin'
gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ )
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
else:
UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model']
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
UpperCAmelCase__ = XCLIPModel(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24
UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCAmelCase__ = prepare_video(snake_case__ )
UpperCAmelCase__ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ )
# Verify outputs
UpperCAmelCase__ = outputs.logits_per_video
UpperCAmelCase__ = logits_per_video.softmax(dim=1 )
print('Probs:' , snake_case__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(snake_case__ , organization='nielsr' )
processor.push_to_hub(snake_case__ , organization='nielsr' )
slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 | 0 |
'''simple docstring'''
import random
from typing import Any
def _lowerCAmelCase ( _UpperCamelCase : Dict ) -> list[Any]:
"""simple docstring"""
for _ in range(len(__a ) ):
_SCREAMING_SNAKE_CASE =random.randint(0 , len(__a ) - 1 )
_SCREAMING_SNAKE_CASE =random.randint(0 , len(__a ) - 1 )
_SCREAMING_SNAKE_CASE =data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase : Dict = ["""python""", """says""", """hello""", """!"""]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 47 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCAmelCase_ : Optional[int] = """src/transformers"""
UpperCAmelCase_ : Tuple = """docs/source/en"""
UpperCAmelCase_ : Optional[Any] = """."""
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
with open(__a , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while not lines[start_index].startswith(__a ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ : Tuple = start_index
while not lines[end_index].startswith(__a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCAmelCase_ : Optional[Any] = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase_ : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
UpperCAmelCase_ : Dict = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase_ : int = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def _A (__a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __a )
return [m.group(0 ) for m in matches]
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__a )
SCREAMING_SNAKE_CASE_ : Tuple = (width - text_length) // 2
SCREAMING_SNAKE_CASE_ : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : Tuple = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE_ : List[Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ : int = collections.defaultdict(__a )
# Let's lookup through all transformers object (once).
for attr_name in dir(__a ):
SCREAMING_SNAKE_CASE_ : Any = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE_ : Dict = slow_tokenizers
SCREAMING_SNAKE_CASE_ : Dict = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = fast_tokenizers
SCREAMING_SNAKE_CASE_ : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : int = tf_models
SCREAMING_SNAKE_CASE_ : Dict = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : Any = flax_models
SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ : str = pt_models
SCREAMING_SNAKE_CASE_ : int = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE_ : List[str] = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''.join(camel_case_split(__a )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE_ : Any = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE_ : Any = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE_ : List[str] = [len(__a ) + 2 for c in columns]
SCREAMING_SNAKE_CASE_ : str = max([len(__a ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE_ : List[Any] = '''|''' + '''|'''.join([_center_text(__a , __a ) for c, w in zip(__a , __a )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE_ : str = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE_ : int = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__a , __a ) for l, w in zip(__a , __a )] ) + "|\n"
return table
def _A (__a=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = _find_text_in_file(
filename=os.path.join(__a , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
SCREAMING_SNAKE_CASE_ : Tuple = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__a , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ : Any = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 91 | 0 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a__ ( __lowercase , __lowercase ) -> Dict:
_A = old_name
if "patch_embed" in old_name:
_A , _A , _A = old_name.split("." )
if layer == "0":
_A = old_name.replace("0" , "convolution1" )
elif layer == "1":
_A = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_A = old_name.replace("3" , "convolution2" )
else:
_A = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowercase ):
_A = R"\b\d{2}\b"
if bool(re.search(__lowercase , __lowercase ) ):
_A = re.search(R"\d\.\d\d." , __lowercase ).group()
else:
_A = re.search(R"\d\.\d." , __lowercase ).group()
if int(match[0] ) < 6:
_A = old_name.replace(__lowercase , "" )
_A = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_A = "intermediate_stages." + trimmed_name
else:
_A = old_name.replace(__lowercase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_A = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_A = str(int(match[2] ) - num_meta4D_last_stage )
_A = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_A = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_A = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_A = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_A = trimmed_name.replace("fc2" , "linear_out" )
_A = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowercase ):
_A = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_A = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_A = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_A = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_A = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_A = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_A = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_A = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_A = new_name.replace("norm" , "layernorm" )
_A = "efficientformer." + new_name
else:
_A = "efficientformer.encoder." + new_name
return new_name
def a__ ( __lowercase , __lowercase ) -> List[str]:
for key in checkpoint.copy().keys():
_A = checkpoint.pop(__lowercase )
_A = val
return checkpoint
def a__ ( ) -> Dict:
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return image
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
_A = torch.load(__lowercase , map_location="cpu" )["model"]
_A = EfficientFormerConfig.from_json_file(__lowercase )
_A = EfficientFormerForImageClassificationWithTeacher(__lowercase )
_A = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_A = config.depths[-1] - config.num_metaad_blocks + 1
_A = convert_torch_checkpoint(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
model.eval()
_A = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_A = prepare_img()
_A = 256
_A = 224
_A = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_A = processor(images=__lowercase , return_tensors="pt" ).pixel_values
# original processing pipeline
_A = Compose(
[
Resize(__lowercase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowercase ),
ToTensor(),
Normalize(__lowercase , __lowercase ),
] )
_A = image_transforms(__lowercase ).unsqueeze(0 )
assert torch.allclose(__lowercase , __lowercase )
_A = model(__lowercase )
_A = outputs.logits
_A = (1, 1000)
if "l1" in model_name:
_A = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_A = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , __lowercase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_A = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowercase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowercase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowercase , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
a_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 352 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def a__ ( __lowercase ) -> None:
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
_A = (
"Expected a list of numbers as input, found "
f"""{type(__lowercase ).__name__}"""
)
raise TypeError(__lowercase )
else:
_A = f"""Expected a list of numbers as input, found {type(__lowercase ).__name__}"""
raise TypeError(__lowercase )
else:
raise ValueError("Missing an input" )
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
__lowerCamelCase : Optional[Any] = {"""allegro/herbert-base-cased""": 514}
__lowerCamelCase : Any = {}
class A__ ( __snake_case ):
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :str = HerbertTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_="</s>" , **A_ , ):
'''simple docstring'''
super().__init__(
A_ , A_ , tokenizer_file=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , sep_token=A_ , **A_ , )
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Optional[int] = [self.cls_token_id]
UpperCamelCase : List[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Any = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 52 |
from __future__ import annotations
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = str(snake_case )
_lowerCAmelCase = [n]
for i in range(1 , len(snake_case ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(str(snake_case ) ) > 3:
if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ):
return False
return True
def _UpperCAmelCase ( snake_case = 11 ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = 13
while len(snake_case ) != count:
if validate(snake_case ):
_lowerCAmelCase = list_truncated_nums(snake_case )
if all(is_prime(snake_case ) for i in list_nums ):
list_truncated_primes.append(snake_case )
num += 2
return list_truncated_primes
def _UpperCAmelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 82 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a__: Optional[Any] = '\nHuman: <<task>>\n\nAssistant: '
a__: List[Any] = 'huggingface-tools/default-prompts'
a__: int = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int="run" )->List[str]:
if prompt_or_repo_id is None:
A__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , UpperCamelCase__ ) is not None:
return prompt_or_repo_id
A__ = cached_file(
UpperCamelCase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 350 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a__: Union[str, Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=2,output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase ( self ):
A__ = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''',torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=__lowerCamelCase,generator=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=50,output_type='''numpy''' ).images
A__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 39 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Dict = [[0 for _ in range(__SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_lowercase : Any = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
_lowercase : int = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 93 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out) | 243 | 0 |
from __future__ import annotations
def A ( lowercase , lowercase ) -> list[int]:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase = i + 1
else:
UpperCamelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 110 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[str] = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
lowercase_ : Union[str, Any] = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 213 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 320 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCAmelCase : List[str] = logging.getLogger(__name__)
def A_ ( a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.argmax(UpperCamelCase__ , axis=1 )
return np.sum(outputs == labels )
def A_ ( a ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf_8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = csv.reader(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = []
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def A_ ( a , a , a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : Optional[int] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : int = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : int = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : List[Any] = with_conta
SCREAMING_SNAKE_CASE_ : List[Any] = with_conta
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE_ : Any = len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = with_conta
SCREAMING_SNAKE_CASE_ : Dict = with_conta
SCREAMING_SNAKE_CASE_ : List[str] = mc_label
SCREAMING_SNAKE_CASE_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=UpperCamelCase__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=UpperCamelCase__ , default='' )
parser.add_argument('--eval_dataset' , type=UpperCamelCase__ , default='' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=4_2 )
parser.add_argument('--num_train_epochs' , type=UpperCamelCase__ , default=3 )
parser.add_argument('--train_batch_size' , type=UpperCamelCase__ , default=8 )
parser.add_argument('--eval_batch_size' , type=UpperCamelCase__ , default=1_6 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=UpperCamelCase__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=UpperCamelCase__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=UpperCamelCase__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCamelCase__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=UpperCamelCase__ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCamelCase__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=UpperCamelCase__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=UpperCamelCase__ , default=0.01 )
parser.add_argument('--lm_coef' , type=UpperCamelCase__ , default=0.9 )
parser.add_argument('--n_valid' , type=UpperCamelCase__ , default=3_7_4 )
parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : List[str] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCamelCase__ , UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : List[Any] = ["""_start_""", """_delimiter_""", """_classify_"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(a ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info('Encoding dataset...' )
SCREAMING_SNAKE_CASE_ : List[str] = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : List[str] = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : Optional[int] = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : Dict = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : str = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Tuple = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = RandomSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : int = TensorDataset(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = SequentialSampler(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.max_steps
SCREAMING_SNAKE_CASE_ : Optional[int] = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : Optional[int] = list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
SCREAMING_SNAKE_CASE_ : List[Any] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
SCREAMING_SNAKE_CASE_ : Dict = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : List[Any] = get_linear_schedule_with_warmup(
UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ )
if args.do_train:
SCREAMING_SNAKE_CASE_ : Tuple = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : List[Any] = tqdm(UpperCamelCase__ , desc='Training' )
for step, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(t.to(UpperCamelCase__ ) for t in batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = batch
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """Training loss: {:.2e} lr: {:.2e}""".format(UpperCamelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = model.module if hasattr(UpperCamelCase__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(args.output_dir , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = os.path.join(args.output_dir , UpperCamelCase__ )
torch.save(model_to_save.state_dict() , UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = 0, 0
SCREAMING_SNAKE_CASE_ : int = 0, 0
for batch in tqdm(UpperCamelCase__ , desc='Evaluating' ):
SCREAMING_SNAKE_CASE_ : Tuple = tuple(t.to(UpperCamelCase__ ) for t in batch )
SCREAMING_SNAKE_CASE_ : Tuple = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(
UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Tuple = mc_labels.to('cpu' ).numpy()
SCREAMING_SNAKE_CASE_ : Any = accuracy(UpperCamelCase__ , UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : List[Any] = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : List[str] = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Any = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
SCREAMING_SNAKE_CASE_ : str = os.path.join(args.output_dir , 'eval_results.txt' )
with open(UpperCamelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , UpperCamelCase__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 253 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCamelCase ( UpperCamelCase__ ):
if hor == 1_2_8:
UpperCAmelCase__ : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Tuple = (3_2, 1_2_8, 2_5_6)
UpperCAmelCase__ : Union[str, Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
UpperCAmelCase__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCAmelCase__ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCAmelCase__ : Any = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCAmelCase__ : Tuple = model.state_dict()
UpperCAmelCase__ : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCAmelCase__ : List[Any] = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCAmelCase__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCAmelCase__ : Optional[Any] = model
UpperCAmelCase__ : Dict = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 163 | 0 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ : Optional[int] = OmegaConf.load(A__ )
if display:
print(yaml.dump(OmegaConf.to_container(A__ ) ) )
return config
def snake_case ( A__ ,A__=None ,A__=None ):
if conf_path is None:
UpperCAmelCase_ : Optional[Any] = "./model_checkpoints/vqgan_only.yaml"
UpperCAmelCase_ : Tuple = load_config(A__ ,display=A__ )
UpperCAmelCase_ : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase_ : Union[str, Any] = "./model_checkpoints/vqgan_only.pt"
UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location=A__ )
if ".ckpt" in ckpt_path:
UpperCAmelCase_ : str = sd["state_dict"]
model.load_state_dict(A__ ,strict=A__ )
model.to(A__ )
del sd
return model
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model.encode(A__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCAmelCase_ : List[Any] = model.decode(A__ )
return xrec
def snake_case ( A__ ,A__=False ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = string.rsplit("." ,1 )
if reload:
UpperCAmelCase_ : Tuple = importlib.import_module(A__ )
importlib.reload(A__ )
return getattr(importlib.import_module(A__ ,package=A__ ) ,cls )
def snake_case ( A__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" ,{} ) )
def snake_case ( A__ ,A__ ,A__=True ,A__=True ):
UpperCAmelCase_ : Any = instantiate_from_config(A__ )
if sd is not None:
model.load_state_dict(A__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def snake_case ( A__ ,A__ ,A__ ,A__ ):
# load the specified checkpoint
if ckpt:
UpperCAmelCase_ : Tuple = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : int = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
UpperCAmelCase_ : Tuple = {"state_dict": None}
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[Any] = load_model_from_config(config.model ,pl_sd["state_dict"] ,gpu=A__ ,eval_mode=A__ )["model"]
return model, global_step
| 253 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Optional[Any] = accelerator.prepare(lowerCAmelCase_ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase_ ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 253 | 1 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple = 60_08_51_47_51_43 ) -> int:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE__ = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE__ = i
while n % i == 0:
SCREAMING_SNAKE_CASE__ = n // i
i += 1
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 219 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , *, # begin keyword-only arguments
UpperCAmelCase="<s>" , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = self.add_symbol(UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase )
_UpperCAmelCase = len(self.symbols )
def __eq__( self , UpperCAmelCase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , UpperCAmelCase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , UpperCAmelCase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(UpperCAmelCase )
return d
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=1 , UpperCAmelCase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(UpperCAmelCase )
self.count.append(UpperCAmelCase )
return idx
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
return 0
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if isinstance(UpperCAmelCase , UpperCAmelCase ):
try:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(UpperCAmelCase ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(' ' , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(UpperCAmelCase )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(UpperCAmelCase ) )
self.add_symbol(UpperCAmelCase , n=UpperCAmelCase , overwrite=UpperCAmelCase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = dict((re.sub(R'@@$' , '' , __lowerCAmelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , __lowerCAmelCase ), v) for k, v in d.items() )
_UpperCAmelCase = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
if not os.path.exists(__lowerCAmelCase ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'checkpoint.pt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location='cpu' )
_UpperCAmelCase = chkpt['cfg']['model']
# dicts
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'dict.txt' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(__lowerCAmelCase )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(__lowerCAmelCase )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['vocab_file'] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'bpecodes' )
if not os.path.isfile(__lowerCAmelCase ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowerCAmelCase , __lowerCAmelCase )
# model config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'config.json' )
_UpperCAmelCase = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1E-12,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# tokenizer config
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) )
# model
_UpperCAmelCase = chkpt['model']
# remove unneeded keys
_UpperCAmelCase = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
else:
_UpperCAmelCase = model_state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase = BioGptConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase = BioGptForCausalLM(__lowerCAmelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCAmelCase )
# save
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
print('Conversion is done!' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 39 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 |
import pprint
import requests
_A = 'https://zenquotes.io/api'
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 117 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = ['''pixel_values''']
def __init__( self: Optional[Any] , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 255 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , **UpperCamelCase_: List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: bool = False , UpperCamelCase_: int = 1 , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Dict , ) -> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(
UpperCamelCase_ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase_ , multiple=UpperCamelCase_ , )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[Any] , ) -> Tuple:
"""simple docstring"""
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: int , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_: List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase_ )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: List[Tuple] = None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase_ ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(UpperCamelCase_ ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase_ )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase_ )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 110 |
from __future__ import annotations
from collections.abc import Iterator
class _a :
def __init__( self: List[str] , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = tree
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: List[str] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
A : Optional[int] = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
A : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
A : Union[str, Any] = dict(zip(vocab, range(len(vocab))))
A : int = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
A : Optional[Any] = Path(tmpdirname)
A : List[str] = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
A : List[Any] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
A : Tuple = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
A : List[Any] = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
A : Any = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
A : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
A : str = tokenizer(['Making tiny model'], return_tensors='pt')
A : Union[str, Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 146 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A : int = trt.Logger(trt.Logger.WARNING)
A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A : Union[str, Any] = logging.getLogger(__name__)
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A : List[Any] = parser.parse_args()
if args.tokenizer_name:
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A : Optional[Any] = args.per_device_eval_batch_size
A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A : Any = True
A : Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A : Union[str, Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
A : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ = time.time()
lowercase__ = end_time - start_time
lowercase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A : str = raw_datasets['validation'].column_names
A : Any = 'question' if 'question' in column_names else column_names[0]
A : int = 'context' if 'context' in column_names else column_names[1]
A : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A : Dict = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
A : str = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ = tokenized_examples.sequence_ids(__magic_name__ )
lowercase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
A : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A : Dict = default_data_collator
A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
A : List[Any] = 0.0
A : Any = 0
A : str = timeit.default_timer()
A : Tuple = None
for step, batch in enumerate(eval_dataloader):
A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A : int = outputs
A : str = torch.tensor(start_logits)
A : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
A : List[str] = nested_truncate(all_preds, len(eval_dataset))
A : List[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 146 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , ) -> List[Any]:
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def _UpperCAmelCase ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = DPTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = DPTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def _UpperCAmelCase ( self ) -> Any:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 320 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
_a = parent
_a = batch_size
_a = encoder_seq_length
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = d_ff
_a = relative_attention_num_buckets
_a = dropout_rate
_a = initializer_factor
_a = eos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = None
_a = decoder_layers
def _UpperCAmelCase ( self ) -> Dict:
return TaConfig.from_pretrained('''google/umt5-base''' )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
_a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
_a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
_a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_a = input_ids.clamp(self.pad_token_id + 1 )
_a = decoder_input_ids.clamp(self.pad_token_id + 1 )
_a = self.get_config()
_a = config.num_attention_heads
_a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, input_dict
def _UpperCAmelCase ( self ) -> int:
_a , _a = self.prepare_config_and_inputs()
return config, inputs_dict
def _UpperCAmelCase ( self ) -> Tuple:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self ) -> List[str]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
_a = UMTaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(
input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , )
_a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
_a = result.last_hidden_state
_a = result.past_key_values
_a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]:
_a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
# first forward pass
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
_a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) )
self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 )
_a , _a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(__UpperCAmelCase )['''last_hidden_state''']
_a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
_a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval()
_a = model(**__UpperCAmelCase )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() )
@require_torch
class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A_ : int = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A_ : str = True
A_ : List[str] = False
A_ : List[Any] = False
A_ : str = True
A_ : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A_ : Optional[Any] = [0.8, 0.9]
def _UpperCAmelCase ( self ) -> Tuple:
_a = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
_a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
_a = self.model_tester.prepare_config_and_inputs()
_a = config_and_inputs[0]
_a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval()
model.to(__UpperCAmelCase )
_a = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ):
_a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase )
_a = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase )
_a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase )
_a = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids
# fmt: off
_a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase )
_a = model.generate(input_ids.to(__UpperCAmelCase ) )
_a = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
_a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) | 320 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
a__ = [[1, 2, 4], [1, 2, 3, 4]]
a__ = DisjunctiveConstraint(__snake_case )
self.assertTrue(isinstance(dc.token_ids ,__snake_case ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase__( self :int ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
a__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(__snake_case ) # fails here
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = [[1, 2, 3], [1, 2, 4]]
a__ = DisjunctiveConstraint(__snake_case )
a__ , a__ , a__ = dc.update(1 )
a__ = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a__ , a__ , a__ = dc.update(2 )
a__ = stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a__ , a__ , a__ = dc.update(3 )
a__ = stepped is True and completed is True and reset is False
self.assertTrue(__snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase__( self :Any ) -> str:
a__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
a__ = DisjunctiveConstraint(__snake_case )
a__ , a__ , a__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
a__ , a__ , a__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
a__ , a__ , a__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
a__ , a__ , a__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
a__ , a__ , a__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
a__ , a__ , a__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
a__ , a__ , a__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 109 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Tuple = logging.get_logger(__name__)
snake_case : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = '''efficientformer'''
def __init__( self :List[str] ,__snake_case :List[int] = [3, 2, 6, 4] ,__snake_case :List[int] = [48, 96, 2_24, 4_48] ,__snake_case :List[bool] = [True, True, True, True] ,__snake_case :int = 4_48 ,__snake_case :int = 32 ,__snake_case :int = 4 ,__snake_case :int = 7 ,__snake_case :int = 5 ,__snake_case :int = 8 ,__snake_case :int = 4 ,__snake_case :float = 0.0 ,__snake_case :int = 16 ,__snake_case :int = 3 ,__snake_case :int = 3 ,__snake_case :int = 3 ,__snake_case :int = 2 ,__snake_case :int = 1 ,__snake_case :float = 0.0 ,__snake_case :int = 1 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :float = 1E-5 ,__snake_case :str = "gelu" ,__snake_case :float = 0.02 ,__snake_case :float = 1E-12 ,__snake_case :int = 2_24 ,__snake_case :float = 1E-05 ,**__snake_case :Dict ,) -> None:
super().__init__(**__snake_case )
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = hidden_sizes
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = initializer_range
a__ = layer_norm_eps
a__ = patch_size
a__ = num_channels
a__ = depths
a__ = mlp_expansion_ratio
a__ = downsamples
a__ = dim
a__ = key_dim
a__ = attention_ratio
a__ = resolution
a__ = pool_size
a__ = downsample_patch_size
a__ = downsample_stride
a__ = downsample_pad
a__ = drop_path_rate
a__ = num_metaad_blocks
a__ = distillation
a__ = use_layer_scale
a__ = layer_scale_init_value
a__ = image_size
a__ = batch_norm_eps
| 109 | 1 |
import math
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [True] * n
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : int = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : str = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = index + i
SCREAMING_SNAKE_CASE_ : Optional[int] = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def A_ ( a = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = math.floor(math.sqrt(a ) ) + 1_0_0
SCREAMING_SNAKE_CASE_ : Any = prime_sieve(a )
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Any = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = last_prime**2
SCREAMING_SNAKE_CASE_ : List[str] = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : Optional[int] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : Tuple = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 253 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Any=True , UpperCamelCase : Dict=True , UpperCamelCase : Any=True , UpperCamelCase : Any=True , UpperCamelCase : Tuple=99 , UpperCamelCase : List[str]=32 , UpperCamelCase : int=5 , UpperCamelCase : Dict=4 , UpperCamelCase : Dict=37 , UpperCamelCase : Dict="gelu" , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Optional[int]=5_12 , UpperCamelCase : int=16 , UpperCamelCase : List[str]=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Tuple=4 , ):
'''simple docstring'''
_snake_case : List[Any] = parent
_snake_case : List[str] = batch_size
_snake_case : Any = seq_length
_snake_case : str = is_training
_snake_case : int = use_attention_mask
_snake_case : Optional[int] = use_token_type_ids
_snake_case : Tuple = use_labels
_snake_case : Any = vocab_size
_snake_case : Any = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : str = hidden_act
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Optional[Any] = type_vocab_size
_snake_case : Dict = type_sequence_label_size
_snake_case : Tuple = initializer_range
_snake_case : List[str] = num_choices
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Tuple = None
if self.use_attention_mask:
_snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Dict = None
if self.use_token_type_ids:
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Tuple = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
_snake_case : Dict = config_and_inputs
_snake_case : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
a_ : int =True
a_ : List[Any] =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : int = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=_lowerCamelCase )
_snake_case : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_snake_case : Tuple = jnp.array([[0, 1, 2, 3, 4, 5]] )
_snake_case : Optional[Any] = model(_lowerCamelCase )[0]
_snake_case : Optional[Any] = 5_00_00
_snake_case : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
_snake_case : Any = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
| 354 |
lowerCAmelCase_ = 256
# Modulus to hash a string
lowerCAmelCase_ = 100_0003
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str )-> bool:
_snake_case : Optional[int] = len(lowerCAmelCase )
_snake_case : int = len(lowerCAmelCase )
if p_len > t_len:
return False
_snake_case : str = 0
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( )-> None:
_snake_case : int = 'abc1abc12'
_snake_case : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_snake_case : Tuple = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase ) and not rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 2)
_snake_case : List[str] = 'ABABX'
_snake_case : Optional[Any] = 'ABABZABABYABABX'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 3)
_snake_case : Tuple = 'AAAB'
_snake_case : Dict = 'ABAAAAAB'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 4)
_snake_case : List[Any] = 'abcdabcy'
_snake_case : Dict = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 5)
_snake_case : Optional[int] = 'Lü'
_snake_case : Optional[int] = 'Lüsai'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
_snake_case : Any = 'Lue'
assert not rabin_karp(lowerCAmelCase , lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 260 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class A_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = VQModel
lowerCAmelCase__ = """sample"""
@property
def _lowerCAmelCase (self :Any , _UpperCamelCase :Optional[int]=(32, 32) )-> Any:
__A = 4
__A = 3
__A = floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCamelCase )
return {"sample": image}
@property
def _lowerCAmelCase (self :Tuple )-> Union[str, Any]:
return (3, 32, 32)
@property
def _lowerCAmelCase (self :Tuple )-> List[Any]:
return (3, 32, 32)
def _lowerCAmelCase (self :Optional[int] )-> Optional[Any]:
__A = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
__A = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase (self :Optional[Any] )-> Any:
pass
def _lowerCAmelCase (self :List[Any] )-> Optional[int]:
pass
def _lowerCAmelCase (self :Optional[int] )-> List[str]:
__A , __A = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_UpperCamelCase )
__A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCAmelCase (self :int )-> List[str]:
__A = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_UpperCamelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__A = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__A = image.to(_UpperCamelCase )
with torch.no_grad():
__A = model(_UpperCamelCase ).sample
__A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__A = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
| 117 |
import heapq
import sys
import numpy as np
snake_case__ : Tuple = tuple[int, int]
class A_ :
def __init__(self :Union[str, Any] )-> Union[str, Any]:
__A = []
__A = set()
def _lowerCAmelCase (self :List[str] )-> str:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _lowerCAmelCase (self :Any )-> Any:
return len(self.elements ) == 0
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Optional[int] )-> Optional[int]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase (self :int , _UpperCamelCase :int )-> int:
if item in self.set:
self.set.remove(_UpperCamelCase )
__A = []
((__A) , (__A)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__A) , (__A)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase (self :Optional[Any] )-> int:
return self.elements[0][1]
def _lowerCAmelCase (self :List[str] )-> List[Any]:
((__A) , (__A)) = heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
__A = np.array(lowerCamelCase )
__A = np.array(lowerCamelCase )
return np.linalg.norm(a - b )
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> List[str]:
'''simple docstring'''
return consistent_heuristic(lowerCamelCase , lowerCamelCase ) // t
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos ) -> Any:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _a ( lowerCamelCase: TPos , lowerCamelCase: int , lowerCamelCase: TPos , lowerCamelCase: dict[TPos, float] ) -> Tuple:
'''simple docstring'''
__A = g_function[start] + Wa * heuristics[i](lowerCamelCase , lowerCamelCase )
return ans
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = np.chararray((n, n) )
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
__A = '''*'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__A = '''#'''
__A = '''-'''
__A = back_pointer[goal]
while x != start:
((__A) , (__A)) = x
# print(x)
__A = '''-'''
__A = back_pointer[x]
__A = '''-'''
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__A = back_pointer[goal]
while x != start:
print(lowerCamelCase , end=''' ''' )
__A = back_pointer[x]
print(lowerCamelCase )
sys.exit()
def _a ( lowerCamelCase: TPos ) -> Optional[Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int] , lowerCamelCase: Tuple , lowerCamelCase: Tuple , lowerCamelCase: Optional[int] , lowerCamelCase: str , lowerCamelCase: List[Any] , lowerCamelCase: Dict , ) -> Dict:
'''simple docstring'''
for itera in range(lowerCamelCase ):
open_list[itera].remove_element(lowerCamelCase )
# print("s", s)
# print("j", j)
((__A) , (__A)) = s
__A = (x - 1, y)
__A = (x + 1, y)
__A = (x, y + 1)
__A = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase )
__A = -1
__A = float('''inf''' )
if valid(lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__A = g_function[s] + 1
__A = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase , key(lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase ):
if key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) <= Wa * key(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase ):
open_list[j].put(
lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
def _a ( ) -> str:
'''simple docstring'''
__A = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
snake_case__ : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
snake_case__ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
snake_case__ : List[str] = make_common_ground()
snake_case__ : int = blocks_blk
# hyper parameters
snake_case__ : Optional[int] = 1
snake_case__ : List[Any] = 1
snake_case__ : int = 20
snake_case__ : Optional[int] = 3 # one consistent and two other inconsistent
# start and end destination
snake_case__ : Tuple = (0, 0)
snake_case__ : Any = (n - 1, n - 1)
snake_case__ : List[Any] = 1
def _a ( lowerCamelCase: TPos , lowerCamelCase: TPos , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__A = {start: 0, goal: float('''inf''' )}
__A = {start: -1, goal: -1}
__A = []
__A = set()
for i in range(lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase , key(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
__A = []
__A = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A , __A = open_list[i].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_inad.append(lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__A = open_list[0].top_show()
visited.add(lowerCamelCase )
expand_state(
lowerCamelCase , 0 , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
close_list_anchor.append(lowerCamelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 117 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase__ ( lowercase_ ,lowercase_ = 0.0 ,lowercase_ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : Any = _sin / (2 * q_factor)
_UpperCamelCase : str = (1 - _cos) / 2
_UpperCamelCase : Any = 1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : List[str] = -2 * _cos
_UpperCamelCase : Tuple = 1 - alpha
_UpperCamelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : List[str] = tau * frequency / samplerate
_UpperCamelCase : str = sin(lowercase_ )
_UpperCamelCase : Optional[Any] = cos(lowercase_ )
_UpperCamelCase : Dict = _sin / (2 * q_factor)
_UpperCamelCase : List[Any] = (1 + _cos) / 2
_UpperCamelCase : Optional[int] = -1 - _cos
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : str = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Tuple = tau * frequency / samplerate
_UpperCamelCase : Optional[int] = sin(lowercase_ )
_UpperCamelCase : Dict = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Dict = _sin / 2
_UpperCamelCase : int = 0
_UpperCamelCase : str = -ba
_UpperCamelCase : List[str] = 1 + alpha
_UpperCamelCase : Optional[int] = -2 * _cos
_UpperCamelCase : Optional[Any] = 1 - alpha
_UpperCamelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : str = tau * frequency / samplerate
_UpperCamelCase : Optional[Any] = sin(lowercase_ )
_UpperCamelCase : Optional[int] = cos(lowercase_ )
_UpperCamelCase : int = _sin / (2 * q_factor)
_UpperCamelCase : List[str] = 1 - alpha
_UpperCamelCase : int = -2 * _cos
_UpperCamelCase : Union[str, Any] = 1 + alpha
_UpperCamelCase : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : int = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : List[Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Optional[int] = 10 ** (gain_db / 40)
_UpperCamelCase : str = 1 + alpha * big_a
_UpperCamelCase : Union[str, Any] = -2 * _cos
_UpperCamelCase : Optional[int] = 1 - alpha * big_a
_UpperCamelCase : int = 1 + alpha / big_a
_UpperCamelCase : Optional[Any] = -2 * _cos
_UpperCamelCase : Any = 1 - alpha / big_a
_UpperCamelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = tau * frequency / samplerate
_UpperCamelCase : Any = sin(lowercase_ )
_UpperCamelCase : Union[str, Any] = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : Union[str, Any] = 10 ** (gain_db / 40)
_UpperCamelCase : Dict = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : int = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : int = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : List[str] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : Any = big_a * (pmc + aaa)
_UpperCamelCase : Dict = 2 * big_a * mpc
_UpperCamelCase : str = big_a * (pmc - aaa)
_UpperCamelCase : Dict = ppmc + aaa
_UpperCamelCase : List[Any] = -2 * pmpc
_UpperCamelCase : Dict = ppmc - aaa
_UpperCamelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_UpperCamelCase : Optional[int] = tau * frequency / samplerate
_UpperCamelCase : int = sin(lowercase_ )
_UpperCamelCase : Any = cos(lowercase_ )
_UpperCamelCase : str = _sin / (2 * q_factor)
_UpperCamelCase : str = 10 ** (gain_db / 40)
_UpperCamelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_UpperCamelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCamelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
_UpperCamelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCamelCase : Optional[Any] = 2 * sqrt(lowercase_ ) * alpha
_UpperCamelCase : List[Any] = big_a * (ppmc + aaa)
_UpperCamelCase : Dict = -2 * big_a * pmpc
_UpperCamelCase : Dict = big_a * (ppmc - aaa)
_UpperCamelCase : Optional[Any] = pmc + aaa
_UpperCamelCase : Any = 2 * mpc
_UpperCamelCase : Any = pmc - aaa
_UpperCamelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 310 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( __lowerCAmelCase):
A: Union[str, Any] = "dandelin/vilt-b32-finetuned-vqa"
A: Union[str, Any] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
A: Optional[Any] = "image_qa"
A: Dict = AutoProcessor
A: Tuple = AutoModelForVisualQuestionAnswering
A: str = ["image", "text"]
A: str = ["text"]
def __init__( self : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : "Image" , lowerCamelCase__ : str ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ , lowerCamelCase__ , return_tensors='''pt''' )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[str] ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.model(**lowerCamelCase__ ).logits
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 146 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : str = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase__ : int = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
UpperCamelCase__ : Optional[Any] = load_in_abit
UpperCamelCase__ : List[str] = load_in_abit
UpperCamelCase__ : str = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase__ : Union[str, Any] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
UpperCamelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCamelCase__ : str = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ : int = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase__ : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = special_dtypes
UpperCamelCase__ : Optional[int] = no_split_module_classes
UpperCamelCase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ : Dict = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = max_memory
UpperCamelCase__ : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ , UpperCamelCase__ : Dict = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Tuple = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ : int = '''.'''.join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase__ : List[Any] = module.weight.data
if module.bias is not None:
UpperCamelCase__ : List[str] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase__ : Dict = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ : str = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : int = sum(SCREAMING_SNAKE_CASE , [] )
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
UpperCamelCase__ : str = False
if hasattr(SCREAMING_SNAKE_CASE , '''base_model_prefix''' ):
UpperCamelCase__ : int = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : Tuple = list(model.named_children() )
UpperCamelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : Dict = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
UpperCamelCase__ : int = ['''.weight''', '''.bias''']
UpperCamelCase__ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : int = name.replace(SCREAMING_SNAKE_CASE , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = param_name
UpperCamelCase__ : str = model
if "." in tensor_name:
UpperCamelCase__ : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCamelCase__ : Optional[int] = new_module
UpperCamelCase__ : List[str] = splits[-1]
# offload weights
UpperCamelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''meta''' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 146 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 351 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase : Tuple = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
UpperCAmelCase : Tuple = int(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = dict(sorted(self.labels.items() ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[int]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = list(_SCREAMING_SNAKE_CASE )
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase : List[str] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.transformer.config.sample_size
UpperCAmelCase : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase : str = torch.tensor(_SCREAMING_SNAKE_CASE , device=self.device ).reshape(-1 )
UpperCAmelCase : int = torch.tensor([1000] * batch_size , device=self.device )
UpperCAmelCase : Optional[int] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase : str = latent_model_input[: len(_SCREAMING_SNAKE_CASE ) // 2]
UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0 )
UpperCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = t
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase : Union[str, Any] = latent_model_input.device.type == """mps"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Dict = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
UpperCAmelCase : Optional[Any] = torch.tensor([timesteps] , dtype=_SCREAMING_SNAKE_CASE , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase : int = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase : int = self.transformer(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , class_labels=_SCREAMING_SNAKE_CASE ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase , UpperCAmelCase : Dict = torch.split(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) // 2 , dim=0 )
UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase : Optional[Any] = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase , UpperCAmelCase : Optional[int] = torch.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dim=1 )
else:
UpperCAmelCase : str = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Any = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
if guidance_scale > 1:
UpperCAmelCase , UpperCAmelCase : str = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase : Optional[int] = latent_model_input
UpperCAmelCase : Optional[int] = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase : Tuple = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase : Tuple = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase : List[Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 109 |
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : str = False
return [i for i in range(2 , UpperCamelCase ) if is_prime[i]]
def _snake_case ( UpperCamelCase : int = 800800 , UpperCamelCase : int = 800800 ):
UpperCAmelCase : Union[str, Any] = degree * loga(UpperCamelCase )
UpperCAmelCase : int = int(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = calculate_prime_numbers(UpperCamelCase )
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Dict = len(UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
_UpperCAmelCase : Optional[Any] = "pytorch_model.bin"
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
__lowercase : Optional[List[str]] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowercase : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__lowercase : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__lowercase : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__lowercase : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[int] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase = dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase = int(eval_result * len(lowercase ) )
print(lowercase )
UpperCamelCase = dataset.sort('probability' , reverse=lowercase )
UpperCamelCase = dataset.select(range(lowercase ) )
UpperCamelCase = dataset.remove_columns(['label', 'probability'] )
UpperCamelCase = dataset.rename_column('prediction' , 'label' )
UpperCamelCase = dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
UpperCamelCase = dataset.shuffle(seed=args.seed )
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def A ( lowercase , lowercase , lowercase , lowercase , **lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = STModelArguments(model_name_or_path=lowercase )
UpperCamelCase = STDataArguments(train_file=lowercase , infer_file=lowercase )
UpperCamelCase = STTrainingArguments(output_dir=lowercase )
UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
UpperCamelCase = {}
UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase = args.train_file
UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase = args.eval_file
for key in data_files:
UpperCamelCase = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
UpperCamelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = False
# Show the progress bar
UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase = data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase = os.path.join(lowercase , 'stage-1' )
UpperCamelCase = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' )
UpperCamelCase = os.path.join(lowercase , 'stage-2' )
# Update arguments_dict
UpperCamelCase = model_path
UpperCamelCase = data_files['train']
UpperCamelCase = current_output_dir
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowercase )
UpperCamelCase = iteration
UpperCamelCase = data_dir_format(iteration + 1 )
UpperCamelCase = AutoConfig.from_pretrained(os.path.join(lowercase , 'best-checkpoint' ) )
UpperCamelCase = config.idalabel
UpperCamelCase = os.path.join(lowercase , 'eval_results_best-checkpoint.json' )
UpperCamelCase = os.path.join(lowercase , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowercase )
with open(lowercase , 'r' ) as f:
UpperCamelCase = float(json.load(lowercase )[args.eval_metric] )
UpperCamelCase = os.path.join(lowercase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
UpperCamelCase = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
UpperCamelCase = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase = eval_result
if best_iteration is None:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowercase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
| 359 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Union[str, Any] = 16
_UpperCAmelCase : Dict = 32
def A ( lowercase , lowercase = 16 ) -> str:
'''simple docstring'''
UpperCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
lowercase , batched=lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
lowercase , padding='longest' , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=lowercase )
UpperCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def A ( lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config['lr']
UpperCamelCase = int(config['num_epochs'] )
UpperCamelCase = int(config['seed'] )
UpperCamelCase = int(config['batch_size'] )
UpperCamelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase = MAX_GPU_BATCH_SIZE
set_seed(lowercase )
UpperCamelCase , UpperCamelCase = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.loss
UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**lowercase )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase , default=lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 110 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( lowerCAmelCase , lowerCAmelCase):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , __UpperCamelCase : int = 7_6_8 , )->List[str]:
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) )
_UpperCAmelCase = nn.Parameter(torch.ones(1 , __UpperCamelCase ) )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Union[str, torch.device]] = None , __UpperCamelCase : Optional[torch.dtype] = None , )->List[Any]:
_UpperCAmelCase = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) )
_UpperCAmelCase = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) )
return self
def lowercase__ ( self : str , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] )->Dict:
_UpperCAmelCase = (embeds * self.std) + self.mean
return embeds
| 260 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A : Union[str, Any] = 16
__A : Optional[Any] = 32
def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ):
'''simple docstring'''
_UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A : Optional[int] = mocked_dataloaders # noqa: F811
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
_UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
_UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['''lr''']
_UpperCAmelCase = int(config['''num_epochs'''] )
_UpperCAmelCase = int(config['''seed'''] )
_UpperCAmelCase = int(config['''batch_size'''] )
set_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
_UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
_UpperCAmelCase = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
'''epoch''': epoch,
} , step=_SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 260 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : str = len(__a )
_a : Union[str, Any] = len(__a )
_a : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_a : List[str] = True
for i in range(__a ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_a : List[Any] = True
if a[i].islower():
_a : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A : List[Any] = None
A : List[str] = logging.get_logger(__name__)
A : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A : int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A : Optional[int] = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class A (_a ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
__lowerCamelCase : List[Any] = TaTokenizer
__lowerCamelCase : Any = []
def __init__( self : List[str] , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : int="</s>" , __lowerCAmelCase : Tuple="<unk>" , __lowerCAmelCase : Dict="<pad>" , __lowerCAmelCase : Union[str, Any]=1_00 , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
A__ = [f'<extra_id_{i}>' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
A__ = len(set(filter(lambda __lowerCAmelCase : bool("""extra_id_""" in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
A_ , tokenizer_file=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = extra_ids
@staticmethod
def a_ ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
A__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , A_ , )
return max_model_length
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> List[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> Optional[int]:
"""simple docstring"""
A__ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
A__ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a_ ( self : Any , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[str]:
"""simple docstring"""
A__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self : str ) -> int:
"""simple docstring"""
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(R"""<extra_id_\d+>""" , A_ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [self.convert_tokens_to_ids(A_ ) for token in self.get_sentinel_tokens()]
| 274 |
def _A ( _lowercase ) -> list:
"""simple docstring"""
def merge(_lowercase , _lowercase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_lowercase ) <= 1:
return collection
__UpperCamelCase = len(_lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by a comma:\n''').strip()
__snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 310 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowerCAmelCase = Lock()
def UpperCAmelCase_ (__a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] , __a : Union[str, Any] , __a : Union[str, Any] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_a : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_a : List[str] = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_a : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_a : Union[str, Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : int = []
_a : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_a : Dict = Pipe()
_a : Optional[Any] = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_a : List[Any] = temp_rs
_a : Optional[int] = temp_rr
for i in range(1 , len(__a ) - 1 ):
_a : int = Pipe()
_a : List[str] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_a : List[str] = temp_rs
_a : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
_a : List[str] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = list(range(1_0 , 0 , -1 ) )
print('Initial List' )
print(*__a )
_a : Tuple = odd_even_transposition(__a )
print('Sorted List\n' )
print(*__a )
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.