code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[str]=400 , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase__ : Any=[0.5, 0.5, 0.5] , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=1 / 255 , UpperCamelCase__ : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
A = parent
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
A = do_rescale
A = rescale_factor
A = do_pad
def UpperCamelCase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if not batched:
A = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
if w < h:
A = int(self.size['shortest_edge'] * h / w )
A = self.size['shortest_edge']
elif w > h:
A = self.size['shortest_edge']
A = int(self.size['shortest_edge'] * w / h )
else:
A = self.size['shortest_edge']
A = self.size['shortest_edge']
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0]
A = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : int ):
A = ConditionalDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Any ):
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
def UpperCamelCase ( self : Optional[int] ):
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def UpperCamelCase ( self : Tuple ):
pass
def UpperCamelCase ( self : List[str] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Optional[int] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Optional[Any] ):
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self : List[str] ):
# prepare image and target
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'image_id': 39769, 'annotations': target}
# encode them
A = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
A = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
A = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
A = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
@slow
def UpperCamelCase ( self : int ):
# prepare image, target and masks_path
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A = json.loads(f.read() )
A = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A = ConditionalDetrImageProcessor(format='coco_panoptic' )
A = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
A = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
A = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
A = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify masks
A = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase__ )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
SCREAMING_SNAKE_CASE : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __UpperCamelCase () -> int:
A = HfArgumentParser((ModelArguments,) )
((A) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
A = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
A = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
A = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
A = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
A = True
A = True
A = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=lowerCAmelCase, decoder_config=lowerCAmelCase, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
A = decoder_config.decoder_start_token_id
A = decoder_config.pad_token_id
if decoder_start_token_id is None:
A = decoder_config.bos_token_id
if pad_token_id is None:
A = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
A = decoder_config.eos_token_id
A = decoder_start_token_id
A = pad_token_id
A = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
A = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
A = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''cvt'''
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : int=[7, 3, 3] , UpperCamelCase__ : int=[4, 2, 2] , UpperCamelCase__ : Union[str, Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Any=[1, 3, 6] , UpperCamelCase__ : int=[1, 2, 10] , UpperCamelCase__ : int=[4.0, 4.0, 4.0] , UpperCamelCase__ : List[str]=[0.0, 0.0, 0.0] , UpperCamelCase__ : Union[str, Any]=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : List[str]=[False, False, True] , UpperCamelCase__ : Dict=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : Tuple=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[Any]=[2, 2, 2] , UpperCamelCase__ : int=[1, 1, 1] , UpperCamelCase__ : Dict=[1, 1, 1] , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Any=1e-1_2 , **UpperCamelCase__ : List[Any] , ):
super().__init__(**UpperCamelCase__ )
A = num_channels
A = patch_sizes
A = patch_stride
A = patch_padding
A = embed_dim
A = num_heads
A = depth
A = mlp_ratio
A = attention_drop_rate
A = drop_rate
A = drop_path_rate
A = qkv_bias
A = cls_token
A = qkv_projection_method
A = kernel_qkv
A = padding_kv
A = stride_kv
A = padding_q
A = stride_q
A = initializer_range
A = layer_norm_eps
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int=13 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : str=32 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple="None" , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Tuple=None , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = relative_attention
A = position_biased_input
A = pos_att_type
A = scope
def UpperCamelCase ( self : str ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
A = TFDebertaVaModel(config=UpperCamelCase__ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(UpperCamelCase__ )
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
A = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
A = self.num_labels
A = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
A = self.num_labels
A = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
A = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[Any] ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = False
def UpperCamelCase ( self : Optional[Any] ):
A = TFDebertaVaModelTester(self )
A = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase ( self : List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCamelCase ( self : Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase ( self : Dict ):
A = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase ( self : Optional[Any] ):
pass
@slow
def UpperCamelCase ( self : Dict ):
A = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
A = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
A = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 )
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : int = 1_000 ) -> int:
A , A = 1, 1
A = 2
while True:
A = 0
A = fa + fa
A , A = fa, f
index += 1
for _ in str(lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> tuple[str, float]:
A = len([g for position, g in enumerate(lowerCAmelCase ) if g == main_target[position]] )
return (item, float(lowerCAmelCase ))
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str ) -> tuple[str, str]:
A = random.randint(0, len(lowerCAmelCase ) - 1 )
A = parent_a[:random_slice] + parent_a[random_slice:]
A = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : list[str] ) -> str:
A = list(lowerCAmelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
A = random.choice(lowerCAmelCase )
return "".join(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : tuple[str, float], lowerCAmelCase : list[tuple[str, float]], lowerCAmelCase : list[str], ) -> list[str]:
A = []
# Generate more children proportionally to the fitness score.
A = int(parent_a[1] * 100 ) + 1
A = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase ):
A = population_score[random.randint(0, lowerCAmelCase )][0]
A , A = crossover(parent_a[0], lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase, lowerCAmelCase ) )
pop.append(mutate(lowerCAmelCase, lowerCAmelCase ) )
return pop
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : list[str], lowerCAmelCase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
A = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
A = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(lowerCAmelCase )
# Generate random starting population.
A = []
for _ in range(lowerCAmelCase ):
population.append(''.join([random.choice(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
A , A = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A = [evaluate(lowerCAmelCase, lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
A = sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase )
# Normalize population score to be between 0 and 1.
A = [
(item, score / len(lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase ):
population.extend(select(population_score[int(lowerCAmelCase )], lowerCAmelCase, lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCAmelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCAmelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __UpperCamelCase (lowerCAmelCase : str ) -> Dict: # picklable for multiprocessing
return x.sum()
def __UpperCamelCase (lowerCAmelCase : int ) -> List[Any]: # picklable for multiprocessing
return i + 1
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : str
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase ( self : Dict ):
A = {}
A = []
A = 1
A = [1, 2]
A = {'a': 1, 'b': 2}
A = {'a': [1, 2], 'b': [3, 4]}
A = {'a': {'1': 1}, 'b': 2}
A = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {'a': 2, 'b': 3}
A = {'a': [2, 3], 'b': [4, 5]}
A = {'a': {'1': 2}, 'b': 3}
A = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
A = 2
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
A = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
A = {'a': 2, 'b': 0, 'c': 2}
A = {
'a': np.eye(2 ).astype(UpperCamelCase__ ),
'b': np.zeros(3 ).astype(UpperCamelCase__ ),
'c': np.ones(2 ).astype(UpperCamelCase__ ),
}
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(UpperCamelCase__ , UpperCamelCase__ , map_numpy=UpperCamelCase__ , num_proc=UpperCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(UpperCamelCase__ ): # can't pickle a local lambda
map_nested(lambda UpperCamelCase__ : x + 1 , UpperCamelCase__ , num_proc=UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
A = {'a': 1, 'b': 2}
A = {'a': 3, 'b': 4}
A = {'a': 5, 'b': 6}
A = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) , UpperCamelCase__ )
def UpperCamelCase ( self : int ):
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(UpperCamelCase__ , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
A = {f'''{i}''': i for i in range(lowerCAmelCase )}
A = map_nested(lambda lowerCAmelCase : x + 10, lowerCAmelCase, num_proc=lowerCAmelCase, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@require_tf
def UpperCamelCase ( self : List[Any] ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(UpperCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=UpperCamelCase__ ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=UpperCamelCase__ ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCamelCase ( self : int ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(UpperCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=UpperCamelCase__ ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=UpperCamelCase__ ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCamelCase ( self : Tuple ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(UpperCamelCase__ , UpperCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data', [{}] )
def __UpperCamelCase (lowerCAmelCase : str ) -> Any:
A = NestedDataStructure(lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output', [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
], )
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : int ) -> str:
A = NestedDataStructure(lowerCAmelCase ).flatten()
assert output == expected_output
def __UpperCamelCase () -> Optional[Any]:
A = A(x=1, y='foobar' )
A = {'x': 1, 'y': 'foobar'}
assert asdict(lowerCAmelCase ) == expected_output
A = {'a': {'b': A(x=10, y='foo' )}, 'c': [A(x=20, y='bar' )]}
A = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(lowerCAmelCase ) == expected_output
with pytest.raises(lowerCAmelCase ):
asdict([1, A(x=10, y='foo' )] )
def __UpperCamelCase (lowerCAmelCase : str ) -> Any:
return text.split()
def __UpperCamelCase (lowerCAmelCase : Any ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __UpperCamelCase () -> Dict:
with Pool(2 ) as pool:
A = list(iflatmap_unordered(lowerCAmelCase, _split_text, kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(lowerCAmelCase, _split_text, kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
lowerCAmelCase, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCAmelCase )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(lowerCAmelCase ) == 4
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : List[str] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[str] , UpperCamelCase__ : Optional[DatasetInfo] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Dict , ):
super().__init__(self , **UpperCamelCase__ )
A = repo_info
A = token
A = None
def UpperCamelCase ( self : str ):
if self.dir_cache is None:
A = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {'name': str(UpperCamelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , **UpperCamelCase__ : int , ):
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
A = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def UpperCamelCase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ):
self._get_dirs()
A = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=False , **UpperCamelCase__ : int ):
self._get_dirs()
A = PurePosixPath(path.strip('/' ) )
A = {}
for p, f in self.dir_cache.items():
A = PurePosixPath(p.strip('/' ) )
A = p.parent
if root == path:
A = f
A = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 699 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''torch''', '''scipy''']
def __init__( self : int , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase ( cls : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def UpperCamelCase ( cls : Any , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ):
requires_backends(cls , ['torch', 'scipy'] )
| 699 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
from __future__ import annotations
def __UpperCamelCase (lowerCAmelCase : int | float | str, lowerCAmelCase : int | float | str ) -> list[str]:
if nth_term == "":
return [""]
A = int(lowerCAmelCase )
A = int(lowerCAmelCase )
A = []
for temp in range(int(lowerCAmelCase ) ):
series.append(f'''1 / {pow(temp + 1, int(lowerCAmelCase ) )}''' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = int(input("Enter the last number (nth term) of the P-Series"))
_UpperCAmelCase = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any=0 ):
A = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
A = np.random.RandomState(UpperCamelCase__ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : int ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Optional[int] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Tuple ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : str ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Tuple ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def UpperCamelCase ( self : int ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase ( self : Any ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 699 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_UpperCAmelCase = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
_UpperCAmelCase = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
_UpperCAmelCase = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Optional[Any] ) -> Optional[int]:
return float((preds == labels).mean() )
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> int:
A = simple_accuracy(lowerCAmelCase, lowerCAmelCase )
A = float(fa_score(y_true=lowerCAmelCase, y_pred=lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any] ) -> str:
A = float(pearsonr(lowerCAmelCase, lowerCAmelCase )[0] )
A = float(spearmanr(lowerCAmelCase, lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : List[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Any ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 699 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : Tuple ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCAmelCase : List[Any], lowerCAmelCase : Any="", lowerCAmelCase : Optional[int]="." ):
A = []
for k, v in d.items():
A = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase, collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase, lowerCAmelCase, sep=lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase )
A = argparse.Namespace()
with open(lowerCAmelCase, 'r' ) as yaml_file:
try:
A = yaml.load(lowerCAmelCase, Loader=yaml.FullLoader )
A = flatten_yaml_as_dict(lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase, str(lowerCAmelCase ) ) )
return config
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : List[str] ) -> Optional[int]:
A = MobileViTVaConfig()
A = False
# dataset
if task_name.startswith('imagenet1k_' ):
A = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A = 384
else:
A = 256
A = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
A = 384
else:
A = 256
A = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A = 151
A = 512
A = 'ade20k-id2label.json'
A = True
elif task_name.startswith('voc_' ):
A = 21
A = 512
A = 'pascal-voc-id2label.json'
A = True
# orig_config
A = load_orig_config_file(lowerCAmelCase )
assert getattr(lowerCAmelCase, 'model.classification.name', -1 ) == "mobilevit_v2", "Invalid model"
A = getattr(lowerCAmelCase, 'model.classification.mitv2.width_multiplier', 1.0 )
assert (
getattr(lowerCAmelCase, 'model.classification.mitv2.attn_norm_layer', -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A = getattr(lowerCAmelCase, 'model.classification.activation.name', 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A = getattr(lowerCAmelCase, 'model.segmentation.output_stride', 16 )
if "_deeplabv3" in task_name:
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_rates', [12, 24, 36] )
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_out_channels', 512 )
A = getattr(lowerCAmelCase, 'model.segmentation.deeplabv3.aspp_dropout', 0.1 )
# id2label
A = 'huggingface/label-files'
A = json.load(open(hf_hub_download(lowerCAmelCase, lowerCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Dict ) -> List[Any]:
A = dct.pop(lowerCAmelCase )
A = val
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Optional[Any]=False ) -> Optional[Any]:
if base_model:
A = ''
else:
A = 'mobilevitv2.'
A = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A = k[8:]
else:
A = k
if ".block." in k:
A = k_new.replace('.block.', '.' )
if ".conv." in k:
A = k_new.replace('.conv.', '.convolution.' )
if ".norm." in k:
A = k_new.replace('.norm.', '.normalization.' )
if "conv_1." in k:
A = k_new.replace('conv_1.', f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A = k_new.replace(f'''layer_{i}.''', f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A = k_new.replace('.exp_1x1.', '.expand_1x1.' )
if ".red_1x1." in k:
A = k_new.replace('.red_1x1.', '.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A = k_new.replace(f'''layer_{i}.0.''', f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A = k_new.replace(f'''layer_{i}.1.local_rep.0.''', f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A = k_new.replace(f'''layer_{i}.1.local_rep.1.''', f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A = [0, 1]
elif i == 4:
A = [0, 1, 2, 3]
elif i == 5:
A = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''', f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''', f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A = k_new.replace(f'''layer_{i}.1.conv_proj.''', f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A = k_new.replace('pre_norm_attn.0.', 'layernorm_before.' )
if "pre_norm_attn.1." in k:
A = k_new.replace('pre_norm_attn.1.', 'attention.' )
if "pre_norm_ffn.0." in k:
A = k_new.replace('pre_norm_ffn.0.', 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A = k_new.replace('pre_norm_ffn.1.', 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A = k_new.replace('pre_norm_ffn.3.', 'ffn.conv2.' )
if "classifier.1." in k:
A = k_new.replace('classifier.1.', 'classifier.' )
if "seg_head." in k:
A = k_new.replace('seg_head.', 'segmentation_head.' )
if ".aspp_layer." in k:
A = k_new.replace('.aspp_layer.', '.' )
if ".aspp_pool." in k:
A = k_new.replace('.aspp_pool.', '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __UpperCamelCase (lowerCAmelCase : Any ) -> Dict:
A = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase () -> Dict:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Dict, lowerCAmelCase : Any, lowerCAmelCase : List[Any] ) -> Dict:
A = get_mobilevitva_config(lowerCAmelCase, lowerCAmelCase )
# load original state_dict
A = torch.load(lowerCAmelCase, map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A = MobileViTVaForSemanticSegmentation(lowerCAmelCase ).eval()
A = False
else:
A = MobileViTVaForImageClassification(lowerCAmelCase ).eval()
A = False
# remove and rename some keys of load the original model
A = checkpoint
remove_unused_keys(lowerCAmelCase )
A = create_rename_keys(lowerCAmelCase, base_model=lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# load modified state_dict
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
A = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32 )
A = image_processor(images=prepare_img(), return_tensors='pt' )
A = model(**lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
A = outputs.logits
A = logits.argmax(-1 ).item()
print('Predicted class:', model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3], lowerCAmelCase, atol=1E-4 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_UpperCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Optional[int]=1_000 ) -> List[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A = n - 1
A = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A = 0
while count < prec:
A = random.randint(2, n - 1 )
A = bin_exp_mod(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if b != 1:
A = True
for _ in range(lowerCAmelCase ):
if b == n - 1:
A = False
break
A = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_UpperCAmelCase = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 699 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ['''pixel_values''']
def __init__( self : Tuple , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[Any] , ):
super().__init__(**UpperCamelCase__ )
A = size if size is not None else {'shortest_edge': 224}
A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A = image_std if image_std is not None else OPENAI_CLIP_STD
A = do_convert_rgb
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
A = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ):
A = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Tuple , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(UpperCamelCase__ , param_name='size' , default_to_square=UpperCamelCase__ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(UpperCamelCase__ , param_name='crop_size' , default_to_square=UpperCamelCase__ )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
A = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 699 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : str = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_UpperCAmelCase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_UpperCAmelCase = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowercase )
class _UpperCAmelCase :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : Optional[int] , ):
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A = len(UpperCamelCase__ )
A = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.''' )
A = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['input_ids']
A = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['input_ids']
A = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ):
A = reader_input['input_ids']
A , A , A = reader_output[:3]
A = len(UpperCamelCase__ )
A = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(UpperCamelCase__ )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
A = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
A = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
A = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''attention_mask''']
| 699 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ):
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''nat'''
SCREAMING_SNAKE_CASE : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[str]=64 , UpperCamelCase__ : Tuple=[3, 4, 6, 5] , UpperCamelCase__ : Dict=[2, 4, 8, 16] , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Tuple=3.0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Dict=1e-5 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : List[Any] , ):
super().__init__(**UpperCamelCase__ )
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = len(UpperCamelCase__ )
A = num_heads
A = kernel_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = layer_norm_eps
A = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
A = layer_scale_init_value
A = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A , A = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
SCREAMING_SNAKE_CASE : str = "utf-8"
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : Optional[str] = None
SCREAMING_SNAKE_CASE : bool = True # deprecated
SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
SCREAMING_SNAKE_CASE : int = 10 << 20 # 10MB
SCREAMING_SNAKE_CASE : Optional[bool] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = JsonConfig
def UpperCamelCase ( self : List[str] ):
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
A = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self : int , UpperCamelCase__ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase__ , (str, list, tuple) ):
A = data_files
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = [files]
A = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
A = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A = [files]
A = [dl_manager.iter_files(UpperCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A = self.config.features.arrow_schema.field(UpperCamelCase__ ).type
A = pa_table.append_column(UpperCamelCase__ , pa.array([None] * len(UpperCamelCase__ ) , type=UpperCamelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A = table_cast(UpperCamelCase__ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A = json.load(UpperCamelCase__ )
# We keep only the field we are interested in
A = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCamelCase__ , (list, tuple) ):
A = set().union(*[row.keys() for row in dataset] )
A = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys}
else:
A = dataset
A = pa.Table.from_pydict(UpperCamelCase__ )
yield file_idx, self._cast_table(UpperCamelCase__ )
# If the file has one json object per line
else:
with open(UpperCamelCase__ , 'rb' ) as f:
A = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A = max(self.config.chunksize // 32 , 16 << 10 )
A = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
A = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCamelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A = batch.decode(self.config.encoding , errors=UpperCamelCase__ ).encode('utf-8' )
try:
while True:
try:
A = paj.read_json(
io.BytesIO(UpperCamelCase__ ) , read_options=paj.ReadOptions(block_size=UpperCamelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCamelCase__ , pa.ArrowInvalid )
and "straddling" not in str(UpperCamelCase__ )
or block_size > len(UpperCamelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(UpperCamelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCamelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A = json.load(UpperCamelCase__ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # list is the only sequence type supported in JSON
try:
A = set().union(*[row.keys() for row in dataset] )
A = {col: [row.get(UpperCamelCase__ ) for row in dataset] for col in keys}
A = pa.Table.from_pydict(UpperCamelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCamelCase__ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCamelCase__ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase__ )
batch_idx += 1
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : int ) -> Tuple:
A = set()
A = []
def parse_line(lowerCAmelCase : List[str] ):
for line in fp:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
A = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase ) > 0:
A = '\n'.join(lowerCAmelCase )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(lowerCAmelCase )
buffer.clear()
continue
else:
A = line.strip()
buffer.append(lowerCAmelCase )
if from_gh:
for filename in os.listdir(lowerCAmelCase ):
A = os.path.join(lowerCAmelCase, lowerCAmelCase )
if not os.path.isdir(lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase ) as fp:
parse_line(lowerCAmelCase )
else:
try:
with zipfile.ZipFile(lowerCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase ) as fp:
parse_line(lowerCAmelCase )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> Any:
A = set()
A = [os.path.join(lowerCAmelCase, lowerCAmelCase ) for p in os.listdir(lowerCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase, lowerCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCamelCase (lowerCAmelCase : int ) -> Union[str, Any]:
return values.split(',' )
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCAmelCase = extract_warnings(args.output_dir, args.targets)
_UpperCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(UpperCamelCase__ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('RGB' )
if str(UpperCamelCase__ ).startswith('mps' ):
A = torch.manual_seed(UpperCamelCase__ )
else:
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : List[Any] ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs(UpperCamelCase__ )
A = sd_pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : List[str] ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs(UpperCamelCase__ )
A = 'french fries'
A = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs(UpperCamelCase__ )
A = [inputs['prompt']] * 2
A = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
A = torch.from_numpy(UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A = image / 2 + 0.5
A = image.permute(0 , 3 , 1 , 2 )
A = image.repeat(2 , 1 , 1 , 1 )
A = sd_pipe(**UpperCamelCase__ ).images
A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' )
A = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs(UpperCamelCase__ )
A = sd_pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
A = [round(UpperCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(UpperCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase ( self : List[Any] ):
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**UpperCamelCase__ )
A = VaeImageProcessor(do_resize=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = pipe(**self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type='pt' ) )[0]
A = components['vae']
A = self.get_dummy_inputs_by_type(UpperCamelCase__ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A = vae.encode(inputs[image_param] ).latent_dist.mode()
A = pipe(**UpperCamelCase__ )[0]
A = np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCamelCase__ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
A = torch.manual_seed(UpperCamelCase__ )
A = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
A = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Optional[int] ):
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self : Tuple ):
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase__ )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self : Dict ):
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase__ )
A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self : Tuple ):
A = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
A = False
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**UpperCamelCase__ )
A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase ( self : Any ):
A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A = inputs['image'].resize((504, 504) )
A = 'timbrooks/instruct-pix2pix'
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A = pipe(**UpperCamelCase__ )
A = output.images[0]
A = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
A = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = [self.constructed_objects[key_node] for key_node, _ in node.value]
A = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys]
A = Counter(UpperCamelCase__ )
A = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False ):
A = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ )
self._check_no_duplicates_on_constructed_node(UpperCamelCase__ )
return mapping
def __UpperCamelCase (lowerCAmelCase : str ) -> Tuple[Optional[str], str]:
A = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A = full_content[1:].index('---' ) + 1
A = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : int , UpperCamelCase__ : Path ):
with open(UpperCamelCase__ , encoding='utf-8' ) as readme_file:
A , A = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCamelCase__ )
else:
return cls()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Path ):
if path.exists():
with open(UpperCamelCase__ , encoding='utf-8' ) as readme_file:
A = readme_file.read()
else:
A = None
A = self._to_readme(UpperCamelCase__ )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(UpperCamelCase__ )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[str] = None ):
if readme_content is not None:
A , A = _split_yaml_from_readme(UpperCamelCase__ )
A = '---\n' + self.to_yaml_string() + '---\n' + content
else:
A = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls : str , UpperCamelCase__ : str ):
A = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCamelCase__ )
def UpperCamelCase ( self : int ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding='utf-8' , ).decode('utf-8' )
_UpperCAmelCase = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> Optional[int]:
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : ArgumentParser ):
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=UpperCamelCase__ , help='Name of the model to download' )
download_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool , UpperCamelCase__ : bool ):
A = model
A = cache
A = force
A = trust_remote_code
def UpperCamelCase ( self : str ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import argparse
import struct
import unittest
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : bytes ):
A = data
# Initialize hash values
A = [
0X6a_09e_667,
0Xbb_67a_e85,
0X3c_6ef_372,
0Xa5_4ff_53a,
0X51_0e5_27f,
0X9b_056_88c,
0X1f_83d_9ab,
0X5b_e0c_d19,
]
# Initialize round constants
A = [
0X42_8a2_f98,
0X71_374_491,
0Xb5_c0f_bcf,
0Xe9_b5d_ba5,
0X39_56c_25b,
0X59_f11_1f1,
0X92_3f8_2a4,
0Xab_1c5_ed5,
0Xd8_07a_a98,
0X12_835_b01,
0X24_318_5be,
0X55_0c7_dc3,
0X72_be5_d74,
0X80_deb_1fe,
0X9b_dc0_6a7,
0Xc1_9bf_174,
0Xe4_9b6_9c1,
0Xef_be4_786,
0X0f_c19_dc6,
0X24_0ca_1cc,
0X2d_e92_c6f,
0X4a_748_4aa,
0X5c_b0a_9dc,
0X76_f98_8da,
0X98_3e5_152,
0Xa8_31c_66d,
0Xb0_032_7c8,
0Xbf_597_fc7,
0Xc6_e00_bf3,
0Xd5_a79_147,
0X06_ca6_351,
0X14_292_967,
0X27_b70_a85,
0X2e_1b2_138,
0X4d_2c6_dfc,
0X53_380_d13,
0X65_0a7_354,
0X76_6a0_abb,
0X81_c2c_92e,
0X92_722_c85,
0Xa2_bfe_8a1,
0Xa8_1a6_64b,
0Xc2_4b8_b70,
0Xc7_6c5_1a3,
0Xd1_92e_819,
0Xd6_990_624,
0Xf4_0e3_585,
0X10_6aa_070,
0X19_a4c_116,
0X1e_376_c08,
0X27_487_74c,
0X34_b0b_cb5,
0X39_1c0_cb3,
0X4e_d8a_a4a,
0X5b_9cc_a4f,
0X68_2e6_ff3,
0X74_8f8_2ee,
0X78_a56_36f,
0X84_c87_814,
0X8c_c70_208,
0X90_bef_ffa,
0Xa4_506_ceb,
0Xbe_f9a_3f7,
0Xc6_717_8f2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : bytes ):
A = B'\x80' + (B'\x00' * (63 - (len(UpperCamelCase__ ) + 8) % 64))
A = struct.pack('>Q' , (len(UpperCamelCase__ ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase ( self : int ):
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' , UpperCamelCase__ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
A = self.ror(UpperCamelCase__ , 6 ) ^ self.ror(UpperCamelCase__ , 11 ) ^ self.ror(UpperCamelCase__ , 25 )
A = (e & f) ^ ((~e & 0Xff_fff_fff) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
A = self.ror(UpperCamelCase__ , 2 ) ^ self.ror(UpperCamelCase__ , 13 ) ^ self.ror(UpperCamelCase__ , 22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X100_000_000
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(UpperCamelCase__ )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
return 0Xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : str ):
import hashlib
A = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(UpperCamelCase__ ).hash , hashlib.shaaaa(UpperCamelCase__ ).hexdigest() )
def __UpperCamelCase () -> None:
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s', '--string', dest='input_string', default='Hello World!! Welcome to Cryptography', help='Hash the string', )
parser.add_argument(
'-f', '--file', dest='input_file', help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file, 'rb' ) as f:
A = f.read()
else:
A = bytes(lowerCAmelCase, 'utf-8' )
print(SHAaaa(lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any]=False ) -> Any:
A = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Any, lowerCAmelCase : Union[str, Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
A = ''
else:
A = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[
: config.hidden_size, :
]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Tuple:
A = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[str] ) -> Any:
A = dct.pop(lowerCAmelCase )
A = val
def __UpperCamelCase () -> Optional[int]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : List[Any], lowerCAmelCase : str=False ) -> Any:
A = BitConfig(
global_padding='same', layer_type='bottleneck', depths=(3, 4, 9), out_features=['stage3'], embedding_dynamic_padding=lowerCAmelCase, )
A = ViTHybridConfig(backbone_config=lowerCAmelCase, image_size=384, num_labels=1_000 )
A = False
# load original model from timm
A = timm.create_model(lowerCAmelCase, pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase )
A = create_rename_keys(lowerCAmelCase, lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase, lowerCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A = ViTHybridModel(lowerCAmelCase ).eval()
else:
A = ViTHybridForImageClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
A = create_transform(**resolve_data_config({}, model=lowerCAmelCase ) )
A = transform.transforms
A = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A = ViTHybridImageProcessor(
do_resize=lowerCAmelCase, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCAmelCase, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=lowerCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
A = prepare_img()
A = transform(lowerCAmelCase ).unsqueeze(0 )
A = processor(lowerCAmelCase, return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase, lowerCAmelCase )
# verify logits
with torch.no_grad():
A = model(lowerCAmelCase )
A = outputs.logits
print('Predicted class:', logits.argmax(-1 ).item() )
if base_model:
A = timm_model.forward_features(lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCAmelCase, outputs.pooler_output, atol=1E-3 )
else:
A = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase, outputs.logits, atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
_UpperCAmelCase = logging.getLogger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '''token-classification'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple ):
if type(UpperCamelCase__ ) == dict:
A = Namespace(**UpperCamelCase__ )
A = import_module('tasks' )
try:
A = getattr(UpperCamelCase__ , hparams.task_type )
A = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
A = self.token_classification_task.get_labels(hparams.labels )
A = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase__ , len(self.labels ) , self.mode )
def UpperCamelCase ( self : int , **UpperCamelCase__ : Optional[int] ):
return self.model(**UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
A = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
A = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
A = self(**UpperCamelCase__ )
A = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase ( self : Optional[Any] ):
A = self.hparams
for mode in ["train", "dev", "test"]:
A = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
A = torch.load(UpperCamelCase__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
A = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase__ )
A = self.token_classification_task.convert_examples_to_features(
UpperCamelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['xlnet'] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['xlnet'] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(self.config.model_type in ['xlnet'] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('Saving features into cached file %s' , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ):
A = self._feature_file(UpperCamelCase__ )
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
A = torch.load(UpperCamelCase__ )
A = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
A = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
A = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
A = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ )
def UpperCamelCase ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""Compute validation""" ""
A = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type != "distilbert":
A = (
batch[2] if self.config.model_type in ['bert', 'xlnet'] else None
) # XLM and RoBERTa don"t use token_type_ids
A = self(**UpperCamelCase__ )
A , A = outputs[:2]
A = logits.detach().cpu().numpy()
A = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
A = torch.stack([x['val_loss'] for x in outputs] ).mean()
A = np.concatenate([x['pred'] for x in outputs] , axis=0 )
A = np.argmax(UpperCamelCase__ , axis=2 )
A = np.concatenate([x['target'] for x in outputs] , axis=0 )
A = dict(enumerate(self.labels ) )
A = [[] for _ in range(out_label_ids.shape[0] )]
A = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
A = {
'val_loss': val_loss_mean,
'accuracy_score': accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
'precision': precision_score(UpperCamelCase__ , UpperCamelCase__ ),
'recall': recall_score(UpperCamelCase__ , UpperCamelCase__ ),
'f1': fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
A = dict(results.items() )
A = results
return ret, preds_list, out_label_list
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Dict ):
# when stable
A , A , A = self._eval_end(UpperCamelCase__ )
A = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : List[str] ):
# updating to test_epoch_end instead of deprecated test_end
A , A , A = self._eval_end(UpperCamelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
A = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
# Add NER specific options
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'--task_type' , default='NER' , type=UpperCamelCase__ , help='Task type to fine tune in training (e.g. NER, POS, etc)' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--labels' , default='' , type=UpperCamelCase__ , help='Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.' , )
parser.add_argument(
'--gpus' , default=0 , type=UpperCamelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
_UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = NERTransformer(args)
_UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
_UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
_UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int=13 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : int=0.9 , UpperCamelCase__ : Union[str, Any]=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = patch_size
A = tubelet_size
A = num_frames
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A = (image_size // patch_size) ** 2
A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self : List[str] ):
A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : str ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
A = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
A = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.num_masks,) )
A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A = mask.expand(self.batch_size , -1 ).bool()
A = model(UpperCamelCase__ , UpperCamelCase__ )
# model only returns predictions for masked patches
A = mask.sum().item()
A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self : Optional[Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
def UpperCamelCase ( self : List[str] ):
A = VideoMAEModelTester(self )
A = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int=False ):
A = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A = torch.ones((self.model_tester.num_masks,) )
A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A = mask.expand(self.model_tester.batch_size , -1 ).bool()
A = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def UpperCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def UpperCamelCase ( self : Tuple ):
pass
def UpperCamelCase ( self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCamelCase ( self : Optional[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def UpperCamelCase ( self : Tuple ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] ):
if not self.has_attentions:
pass
else:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = self.model_tester.seq_length - self.model_tester.num_masks
A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A = True
A = False
A = True
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self : Optional[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ):
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A = outputs.hidden_states
A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A = self.model_tester.seq_length - self.model_tester.num_masks
A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : int ):
pass
def __UpperCamelCase () -> List[str]:
A = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
A = np.load(lowerCAmelCase )
return list(lowerCAmelCase )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Tuple ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Any ):
A = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
UpperCamelCase__ )
A = self.default_image_processor
A = prepare_video()
A = image_processor(UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
# verify the logits
A = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self : int ):
A = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(UpperCamelCase__ )
A = self.default_image_processor
A = prepare_video()
A = image_processor(UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
A = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
A = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
# verify the logits
A = torch.Size([1, 1408, 1536] )
A = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A = torch.tensor([0.5_142] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase__ , atol=1e-4 ) )
| 699 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = FunnelTokenizer
SCREAMING_SNAKE_CASE : int = FunnelTokenizerFast
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : str = True
def UpperCamelCase ( self : List[Any] ):
super().setUp()
A = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self : str , **UpperCamelCase__ : List[Any] ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , **UpperCamelCase__ : str ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int ):
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self : Any ):
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase ( self : Union[str, Any] ):
A = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
A = tokenizer('UNwant\u00E9d,running' )
A = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
A = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 699 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 1 |
from itertools import product
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> list[int]:
A = sides_number
A = max_face_number * dice_number
A = [0] * (max_total + 1)
A = 1
A = range(lowerCAmelCase, max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase, repeat=lowerCAmelCase ):
A = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCamelCase () -> float:
A = total_frequency_distribution(
sides_number=4, dice_number=9 )
A = total_frequency_distribution(
sides_number=6, dice_number=6 )
A = 0
A = 9
A = 4 * 9
A = 6
for peter_total in range(lowerCAmelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A = (4**9) * (6**6)
A = peter_wins_count / total_games_number
A = round(lowerCAmelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : list[str] ) -> str:
A = ''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=[10, 20, 30, 40] , UpperCamelCase__ : Dict=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=10 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : str=["stage2", "stage3", "stage4"] , UpperCamelCase__ : int=[2, 3, 4] , UpperCamelCase__ : str=None , ):
A = parent
A = batch_size
A = image_size
A = num_channels
A = num_stages
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = intermediate_size
A = hidden_act
A = num_labels
A = initializer_range
A = out_features
A = out_indices
A = scope
def UpperCamelCase ( self : Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : Optional[int] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
A = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
A = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
A = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase ( self : Optional[Any] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = False
def UpperCamelCase ( self : int ):
A = ConvNextModelTester(self )
A = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : int ):
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def UpperCamelCase ( self : Tuple ):
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def UpperCamelCase ( self : Union[str, Any] ):
pass
def UpperCamelCase ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] ):
def check_hidden_states_output(UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Optional[int] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase ( self : List[str] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase () -> Tuple:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Any ):
A = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = ConvNextConfig
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase ( self : str ):
A = ConvNextModelTester(self )
| 699 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict: # noqa: E741
A = len(lowerCAmelCase )
A = 0
A = [0] * n
A = [False] * n
A = [False] * n
def dfs(lowerCAmelCase : Dict, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : List[Any] ):
if parent == root:
out_edge_count += 1
A = True
A = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A = dfs(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
A = min(low[at], low[to] )
# AP found via bridge
if at < low[to]:
A = True
# AP found via cycle
if at == low[to]:
A = True
else:
A = min(low[at], lowerCAmelCase )
return out_edge_count
for i in range(lowerCAmelCase ):
if not visited[i]:
A = 0
A = dfs(lowerCAmelCase, lowerCAmelCase, -1, lowerCAmelCase )
A = out_edge_count > 1
for x in range(len(lowerCAmelCase ) ):
if is_art[x] is True:
print(lowerCAmelCase )
# Adjacency list of graph
_UpperCAmelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
_UpperCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
_UpperCAmelCase = "main"
# Default branch name
_UpperCAmelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
_UpperCAmelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
_UpperCAmelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
_UpperCAmelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def __UpperCamelCase () -> Union[str, Any]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __UpperCamelCase () -> int:
print('Bonjour!' )
yield
print('Au revoir!' )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Tuple ):
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self : int , UpperCamelCase__ : int ):
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self : str , UpperCamelCase__ : Union[str, Any] ):
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCamelCase ( self : Any ):
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels'] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['start_positions', 'end_positions'] )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels'] )
@require_tf
def UpperCamelCase ( self : Optional[Any] ):
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels'] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ['start_positions', 'end_positions'] )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ['labels'] )
@require_flax
def UpperCamelCase ( self : Any ):
# Flax models don't have labels
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
| 699 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __UpperCamelCase () -> None:
print('Making key files...' )
make_key_files('rsa', 1_024 )
print('Key files generation successful.' )
def __UpperCamelCase (lowerCAmelCase : int ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
A = rabinMiller.generate_large_prime(lowerCAmelCase )
print('Generating prime q...' )
A = rabinMiller.generate_large_prime(lowerCAmelCase )
A = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
A = random.randrange(2 ** (key_size - 1), 2 ** (key_size) )
if cryptoMath.gcd(lowerCAmelCase, (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
A = cryptoMath.find_mod_inverse(lowerCAmelCase, (p - 1) * (q - 1) )
A = (n, e)
A = (n, d)
return (public_key, private_key)
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : int ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
A , A = generate_key(lowerCAmelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''', 'w' ) as out_file:
out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''', 'w' ) as out_file:
out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_UpperCAmelCase = False
@skip_mps
class _UpperCAmelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionAttendAndExcitePipeline
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase ( cls : Optional[Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def UpperCamelCase ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def UpperCamelCase ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
A = CLIPTextModel(UpperCamelCase__ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int=0 ):
if str(UpperCamelCase__ ).startswith('mps' ):
A = torch.manual_seed(UpperCamelCase__ )
else:
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A = A = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCamelCase ( self : str ):
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs(UpperCamelCase__ )
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCamelCase ( self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def UpperCamelCase ( self : Optional[int] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self : Union[str, Any] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def UpperCamelCase ( self : int ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCamelCase ( self : int ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def UpperCamelCase ( self : int ):
super().test_save_load_local(expected_max_difference=5e-4 )
def UpperCamelCase ( self : str ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase ( cls : List[Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
@classmethod
def UpperCamelCase ( cls : List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[Any] ):
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
A = 'a painting of an elephant with glasses'
A = [5, 7]
A = pipe(
prompt=UpperCamelCase__ , token_indices=UpperCamelCase__ , guidance_scale=7.5 , generator=UpperCamelCase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 699 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int]=0 ):
A = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
A = torch.manual_seed(UpperCamelCase__ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Dict ):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : List[Any] ):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Any ):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : List[Any] ):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCamelCase ( self : Union[str, Any] ):
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = self.get_dummy_inputs()
A = pipe(**UpperCamelCase__ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : str ):
A = ort.SessionOptions()
A = False
return options
def UpperCamelCase ( self : Optional[int] ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCamelCase ( self : List[str] ):
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
A = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'A fantasy landscape, trending on artstation'
A = torch.manual_seed(0 )
A = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type='np' , )
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_UpperCAmelCase = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCamelCase (lowerCAmelCase : str ) -> str:
if "://" in dataset_path:
A = dataset_path.split('://' )[1]
return dataset_path
def __UpperCamelCase (lowerCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCamelCase (lowerCAmelCase : fsspec.AbstractFileSystem, lowerCAmelCase : str, lowerCAmelCase : str ) -> List[str]:
A = not is_remote_filesystem(lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCAmelCase ), fs._strip_protocol(lowerCAmelCase ) )
else:
fs.mv(lowerCAmelCase, lowerCAmelCase, recursive=lowerCAmelCase )
def __UpperCamelCase () -> None:
if hasattr(fsspec.asyn, 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
A = None
A = None
A = threading.Lock()
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''poolformer'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : int=4.0 , UpperCamelCase__ : Any=[2, 2, 6, 2] , UpperCamelCase__ : Any=[64, 128, 320, 512] , UpperCamelCase__ : List[Any]=[7, 3, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2, 2] , UpperCamelCase__ : Dict=[2, 1, 1, 1] , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : int=0.02 , **UpperCamelCase__ : List[Any] , ):
A = num_channels
A = patch_size
A = stride
A = padding
A = pool_size
A = hidden_sizes
A = mlp_ratio
A = depths
A = patch_sizes
A = strides
A = num_encoder_blocks
A = drop_path_rate
A = hidden_act
A = use_layer_scale
A = layer_scale_init_value
A = initializer_range
super().__init__(**UpperCamelCase__ )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = version.parse('''1.11''' )
@property
def UpperCamelCase ( self : Optional[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase ( self : List[str] ):
return 2e-3
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : str ):
A = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
A = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A = model(UpperCamelCase__ )['last_hidden_state']
A = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : Optional[int] ):
A = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
A = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(UpperCamelCase__ )
from datasets import load_dataset
A = load_dataset('nielsr/rvlcdip-demo' )
A = dataset['train'][0]['image'].convert('RGB' )
A = image_processor(UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits
A = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
A = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : int, lowerCAmelCase : str ) -> List[Any]:
# Initialise PyTorch model
A = LxmertConfig.from_json_file(lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
A = LxmertForPreTraining(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : Optional[Any] = '''ViTImageProcessor'''
SCREAMING_SNAKE_CASE : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : str , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : int ):
A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
A = kwargs.pop('feature_extractor' )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : int , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : int ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
A = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None:
A = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
A = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if visual_prompt is not None and images is not None:
A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : int ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Tuple ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
@property
def UpperCamelCase ( self : str ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCamelCase__ , )
return self.image_processor
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
import pprint
import requests
_UpperCAmelCase = "https://zenquotes.io/api"
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : List[Any] ) -> int:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A = flax_key_tuple[:-1] + ('weight',)
A = torch.permute(lowerCAmelCase, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ):
# linear layer
A = flax_key_tuple[:-1] + ('weight',)
A = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : List[str], lowerCAmelCase : List[Any] ) -> int:
if "metadata" in layer:
A = layer.split('metadata' )
A = ''.join(split_layer[0] )[:-1]
A = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
A = layer.split('kvstore' )
A = ''.join(split_layer[0] )[:-1]
A = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
A = layer.split('/' )
A = '/'.join(split_layer[:-1] )
A = (split_layer[-1],)
if "kvstore/path" in layer:
A = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
A = 'file'
else:
A = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Union[str, Any] ) -> Tuple:
A = rename_keys(lowerCAmelCase )
A = {}
for k, v in current_block.items():
A = v
A = new_current_block
torch.save(lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : List[str], lowerCAmelCase : str, lowerCAmelCase : Optional[Any], lowerCAmelCase : str = WEIGHTS_NAME ) -> List[Any]:
A = convert_file_size_to_int(lowerCAmelCase )
A = []
A = {}
A = 0
A = 0
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint', 'rb' ) as fp:
A = serialization.msgpack_restore(fp.read() )['optimizer']['target']
A = flatten_dict(lowerCAmelCase, sep='/' )
A = {}
for layer in checkpoint_info.keys():
A , A , A = get_key_and_tensorstore_dict(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if curr_real_layer_name in all_layers:
A = content
else:
A = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A = torch.tensor(lowerCAmelCase )
A = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A , A = rename_base_flax_keys(tuple(key.split('/' ) ), lowerCAmelCase )
A = '/'.join(lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A = os.path.join(
lowerCAmelCase, weights_name.replace('.bin', f'''-{len(lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase, lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A = {}
A = 0
A = raw_weights.to(getattr(lowerCAmelCase, lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A = os.path.join(lowerCAmelCase, weights_name.replace('.bin', f'''-{len(lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase, lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A = {}
A = {}
for idx, shard in enumerate(lowerCAmelCase ):
A = weights_name.replace(
'.bin', f'''-{idx+1:05d}-of-{len(lowerCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
A = os.path.join(lowerCAmelCase, weights_name.replace('.bin', f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase, os.path.join(lowerCAmelCase, lowerCAmelCase ) )
A = shard
for key in shard:
A = shard_file
# Add the metadata
A = {'total_size': total_size}
A = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase, lowerCAmelCase ), 'w', encoding='utf-8' ) as f:
A = json.dumps(lowerCAmelCase, indent=2, sort_keys=lowerCAmelCase ) + '\n'
f.write(lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_UpperCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __UpperCamelCase () -> str:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
A = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted', device_map='auto' )
A = TaTokenizer.from_pretrained('t5-small' )
A = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
A = tokenizer(lowerCAmelCase, return_tensors='pt' ).input_ids
A = model.generate(lowerCAmelCase, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Dict:
A = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
A = f'''{src_lang}-{tgt_lang}'''
A = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowerCAmelCase, exist_ok=lowerCAmelCase )
A = os.path.join(lowerCAmelCase, 'README.md' )
print(f'''Generating {path}''' )
with open(lowerCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(lowerCAmelCase )
# make sure we are under the root of the project
_UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
_UpperCAmelCase = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_UpperCAmelCase = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 699 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Tuple = True
def UpperCamelCase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
A = SpeechTaTokenizer(UpperCamelCase__ )
A = AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
A = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = 'this is a test'
A = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Any=20 , UpperCamelCase__ : List[str]=5 ):
A , A = self.get_input_output_texts(UpperCamelCase__ )
A = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
A = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return text, ids
def UpperCamelCase ( self : int ):
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCamelCase ( self : int ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCamelCase__ ) , 81 )
def UpperCamelCase ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCamelCase ( self : Optional[Any] ):
A = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
A = tokenizer.vocab_size
A = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A = ['aaaaa bbbbbb', 'cccccccccdddddddd']
A = tokenizer.add_tokens(UpperCamelCase__ )
A = tokenizer.vocab_size
A = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size + len(UpperCamelCase__ ) )
A = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
A = tokenizer.add_special_tokens(UpperCamelCase__ )
A = tokenizer.vocab_size
A = len(UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , 0 )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , len(UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , all_size_a + len(UpperCamelCase__ ) )
A = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCamelCase__ )
self.assertGreaterEqual(len(UpperCamelCase__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCamelCase ( self : List[Any] ):
pass
def UpperCamelCase ( self : Tuple ):
pass
def UpperCamelCase ( self : Optional[Any] ):
A = self.get_tokenizer()
A = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCamelCase__ , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
A = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
# fmt: off
self.assertListEqual(UpperCamelCase__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCamelCase ( self : Any ):
# Use custom sequence because this tokenizer does not handle numbers.
A = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
A = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCamelCase__ , )
| 699 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> List[Any]:
# Initialise PyTorch model
A = TaConfig.from_json_file(lowerCAmelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
A = TaForConditionalGeneration(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : list ) -> list:
if len(lowerCAmelCase ) <= 1:
return [tuple(lowerCAmelCase )]
A = []
def generate(lowerCAmelCase : int, lowerCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1, lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A , A = arr[k - 1], arr[i]
else: # k is odd
A , A = arr[k - 1], arr[0]
generate(k - 1, lowerCAmelCase )
generate(len(lowerCAmelCase ), lowerCAmelCase )
return res
if __name__ == "__main__":
_UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 699 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __UpperCamelCase (lowerCAmelCase : List[str]=None ) -> Optional[int]:
A = argparse.ArgumentParser(add_help=lowerCAmelCase, allow_abbrev=lowerCAmelCase )
# The main config parser
A = config_command_parser(lowerCAmelCase )
# The subparser to add commands to
A = config_parser.add_subparsers(title='subcommands', dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(lowerCAmelCase, parents=[parent_parser] )
update_command_parser(lowerCAmelCase, parents=[parent_parser] )
return config_parser
def __UpperCamelCase () -> Optional[int]:
A = get_config_parser()
A = config_parser.parse_args()
if not hasattr(lowerCAmelCase, 'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 699 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_UpperCAmelCase = HfApi()
_UpperCAmelCase = {}
# fmt: off
_UpperCAmelCase = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_UpperCAmelCase = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_UpperCAmelCase = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_UpperCAmelCase = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_UpperCAmelCase = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_UpperCAmelCase = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_UpperCAmelCase = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_UpperCAmelCase = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_UpperCAmelCase = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_UpperCAmelCase = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_UpperCAmelCase = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_UpperCAmelCase = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_UpperCAmelCase = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_UpperCAmelCase = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_UpperCAmelCase = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_UpperCAmelCase = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_UpperCAmelCase = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("CompVis"):
_UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_UpperCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_UpperCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_UpperCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 699 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : List[Any] ) -> Any:
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
A = Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw ).convert('RGB' )
A = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) ),
] )
A = transform(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
return image
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> Optional[Any]:
if "visual_encoder" in key:
A = re.sub('visual_encoder*', 'vision_model.encoder', lowerCAmelCase )
if "blocks" in key:
A = re.sub(r'blocks', 'layers', lowerCAmelCase )
if "attn" in key:
A = re.sub(r'attn', 'self_attn', lowerCAmelCase )
if "norm1" in key:
A = re.sub(r'norm1', 'layer_norm1', lowerCAmelCase )
if "norm2" in key:
A = re.sub(r'norm2', 'layer_norm2', lowerCAmelCase )
if "encoder.norm" in key:
A = re.sub(r'encoder.norm', 'post_layernorm', lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
A = re.sub(r'encoder.patch_embed.proj', 'embeddings.patch_embedding', lowerCAmelCase )
if "encoder.pos_embed" in key:
A = re.sub(r'encoder.pos_embed', 'embeddings.position_embedding', lowerCAmelCase )
if "encoder.cls_token" in key:
A = re.sub(r'encoder.cls_token', 'embeddings.class_embedding', lowerCAmelCase )
if "self_attn" in key:
A = re.sub(r'self_attn.proj', 'self_attn.projection', lowerCAmelCase )
return key
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : List[Any], lowerCAmelCase : int=None ) -> List[Any]:
if config_path is not None:
A = BlipConfig.from_pretrained(lowerCAmelCase )
else:
A = BlipConfig(projection_dim=512, text_config={}, vision_config={} )
A = BlipForConditionalGeneration(lowerCAmelCase ).eval()
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
A = blip_decoder(pretrained=lowerCAmelCase, image_size=384, vit='base' )
A = pt_model.eval()
A = pt_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(lowerCAmelCase )
A = rename_key(lowerCAmelCase )
A = value
hf_model.load_state_dict(lowerCAmelCase )
A = 384
A = load_demo_image(image_size=lowerCAmelCase, device='cpu' )
A = BertTokenizer.from_pretrained('bert-base-uncased' )
A = tokenizer(['a picture of'] ).input_ids
A = hf_model.generate(lowerCAmelCase, lowerCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
A = hf_model.generate(lowerCAmelCase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
A = blip_vqa(pretrained=lowerCAmelCase, image_size=lowerCAmelCase, vit='base' )
vqa_model.eval()
A = vqa_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(lowerCAmelCase )
A = rename_key(lowerCAmelCase )
A = value
A = BlipForQuestionAnswering(lowerCAmelCase )
hf_vqa_model.load_state_dict(lowerCAmelCase )
A = ['How many dogs are in this image?']
A = tokenizer(lowerCAmelCase, return_tensors='pt' ).input_ids
A = hf_vqa_model.generate(lowerCAmelCase, lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
A = blip_itm(pretrained=lowerCAmelCase, image_size=lowerCAmelCase, vit='base' )
itm_model.eval()
A = itm_model.state_dict()
for key in modified_state_dict.copy():
A = modified_state_dict.pop(lowerCAmelCase )
A = rename_key(lowerCAmelCase )
A = value
A = BlipForImageTextRetrieval(lowerCAmelCase )
A = ['A picture of a woman with a dog sitting in a beach']
A = tokenizer(
lowerCAmelCase, return_tensors='pt', padding='max_length', truncation=lowerCAmelCase, max_length=35, ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase )
hf_itm_model.eval()
A = hf_itm_model(lowerCAmelCase, lowerCAmelCase, use_itm_head=lowerCAmelCase )
A = hf_itm_model(lowerCAmelCase, lowerCAmelCase, use_itm_head=lowerCAmelCase )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_UpperCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 699 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_UpperCAmelCase = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class _UpperCAmelCase ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str = " " ):
A = sentence_delimiter
def UpperCamelCase ( self : Any , UpperCamelCase__ : str ):
return list(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
A = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_UpperCAmelCase = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_UpperCAmelCase = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_UpperCAmelCase = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
A = 0
A = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
A = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 699 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_UpperCAmelCase = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_UpperCAmelCase = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : int = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : str = DPRContextEncoderTokenizer
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Any = DPRQuestionEncoderTokenizer
_UpperCAmelCase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_UpperCAmelCase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_UpperCAmelCase = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowercase )
class _UpperCAmelCase :
'''simple docstring'''
def __call__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A = len(UpperCamelCase__ )
A = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), f'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.'''
A = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['input_ids']
A = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['input_ids']
A = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ):
A = reader_input['input_ids']
A , A , A = reader_output[:3]
A = len(UpperCamelCase__ )
A = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A = []
for doc_id in sorted_docs:
A = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A = sequence_ids.index(self.pad_token_id )
else:
A = len(UpperCamelCase__ )
A = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
A = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
A = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
A = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : int = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE : List[Any] = DPRReaderTokenizer
| 699 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''whether to use adafactor'''} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__lowercase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__lowercase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE : Optional[float] = field(default=__lowercase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE : Optional[float] = field(
default=__lowercase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 699 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def UpperCamelCase ( self : Optional[int] ):
A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , 'num_attention_heads' ) )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=640 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : str=10 , UpperCamelCase__ : Any=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = last_hidden_size
A = num_attention_heads
A = hidden_act
A = conv_kernel_size
A = output_stride
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = classifier_dropout_prob
A = use_labels
A = is_training
A = num_labels
A = initializer_range
A = scope
def UpperCamelCase ( self : Optional[int] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.num_labels )
A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self : Optional[int] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
A = MobileViTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
A = self.num_labels
A = MobileViTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A = self.num_labels
A = MobileViTForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Dict ):
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase ( self : Dict ):
A = MobileViTModelTester(self )
A = MobileViTConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def UpperCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase ( self : List[Any] ):
pass
def UpperCamelCase ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : List[str] ):
pass
def UpperCamelCase ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
def check_hidden_states_output(UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
A = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A = outputs.hidden_states
A = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : int ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCamelCase ( self : Tuple ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def UpperCamelCase ( self : List[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = MobileViTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCamelCase () -> List[Any]:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Any ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : str ):
A = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(UpperCamelCase__ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self : List[Any] ):
A = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
A = model.to(UpperCamelCase__ )
A = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits
# verify the logits
A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
A = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self : Optional[Any] ):
A = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
A = model.to(UpperCamelCase__ )
A = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
A = prepare_img()
A = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A = model(**UpperCamelCase__ )
A = outputs.logits.detach().cpu()
A = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 699 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 1 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Dict=6 , UpperCamelCase__ : Optional[int]=17 , UpperCamelCase__ : Optional[int]=23 , UpperCamelCase__ : List[Any]=11 , UpperCamelCase__ : Optional[Any]=True , ):
A = parent
A = batch_size
A = seq_length
A = act_dim
A = state_dim
A = hidden_size
A = max_length
A = is_training
def UpperCamelCase ( self : str ):
A = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
A = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
A = floats_tensor((self.batch_size, self.seq_length, 1) )
A = floats_tensor((self.batch_size, self.seq_length, 1) )
A = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
A = random_attention_mask((self.batch_size, self.seq_length) )
A = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase ( self : Any ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , ):
A = DecisionTransformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A = model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase ( self : str ):
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = ()
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE : str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : str = False
def UpperCamelCase ( self : Union[str, Any] ):
A = DecisionTransformerModelTester(self )
A = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def UpperCamelCase ( self : List[Any] ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = DecisionTransformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCamelCase ( self : Dict ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(UpperCamelCase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : List[str] ):
A = 2 # number of steps of autoregressive prediction we will perform
A = 10 # defined by the RL environment, may be normalized
A = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
A = model.to(UpperCamelCase__ )
A = model.config
torch.manual_seed(0 )
A = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ) # env.reset()
A = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=UpperCamelCase__ )
A = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
A = state
A = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase__ , dtype=torch.floataa )
A = torch.zeros(1 , 0 , device=UpperCamelCase__ , dtype=torch.floataa )
A = torch.tensor(0 , device=UpperCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase__ ):
A = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase__ )] , dim=1 )
A = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase__ )] , dim=1 )
A = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
A , A , A = model(
states=UpperCamelCase__ , actions=UpperCamelCase__ , rewards=UpperCamelCase__ , returns_to_go=UpperCamelCase__ , timesteps=UpperCamelCase__ , attention_mask=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
A , A , A , A = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
A = action_pred[0, -1]
A = torch.cat([states, state] , dim=1 )
A = returns_to_go[0, -1] - reward
A = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
A = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 699 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : MutableSequence[float] ):
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
A = list(UpperCamelCase__ )
A = degree
def __add__( self : Union[str, Any] , UpperCamelCase__ : Polynomial ):
if self.degree > polynomial_a.degree:
A = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
A = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self : int , UpperCamelCase__ : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[Any] , UpperCamelCase__ : Polynomial ):
A = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : int | float ):
A = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str] ):
A = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self : List[Any] ):
return self.__str__()
def UpperCamelCase ( self : Dict ):
A = [0] * self.degree
for i in range(self.degree ):
A = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : int | float = 0 ):
A = [0] * (self.degree + 2)
A = constant
for i in range(self.degree + 1 ):
A = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self : Optional[Any] , UpperCamelCase__ : object ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[Any] , UpperCamelCase__ : object ):
return not self.__eq__(UpperCamelCase__ )
| 699 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 1 |
def __UpperCamelCase (lowerCAmelCase : str ) -> List[str]:
if not head:
return True
# split the list to two parts
A , A = head.next, head
while fast and fast.next:
A = fast.next.next
A = slow.next
A = slow.next
A = None # Don't forget here! But forget still works!
# reverse the second part
A = None
while second:
A = second.next
A = node
A = second
A = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
A = node.next
A = head.next
return True
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> Dict:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
A = A = A = head
while fast and fast.next:
A , A = fast.next.next, slow.next
# 2. Push the second half into the stack
A = [slow.val]
while slow.next:
A = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
A = cur.next
return True
def __UpperCamelCase (lowerCAmelCase : Any ) -> Any:
if not head or not head.next:
return True
A = {}
A = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase )
else:
A = [pos]
A = head.next
pos += 1
A = pos - 1
A = 0
for v in d.values():
if len(lowerCAmelCase ) % 2 != 0:
middle += 1
else:
A = 0
for i in range(0, len(lowerCAmelCase ) ):
if v[i] + v[len(lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 699 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 1 |
import torch
from transformers import AutoModel
class _UpperCAmelCase ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any="sayef/fsner-bert-base-uncased" ):
super(UpperCamelCase__ , self ).__init__()
A = AutoModel.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ )
A = torch.nn.CosineSimilarity(3 , 1e-0_8 )
A = torch.nn.Softmax(dim=1 )
def UpperCamelCase ( self : Any , **UpperCamelCase__ : int ):
return self.bert(**UpperCamelCase__ ).last_hidden_state
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
return token_embeddings.sum(2 , keepdim=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str]=1 ):
return self.softmax(T * self.cos(UpperCamelCase__ , UpperCamelCase__ ) )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
A = W_supports['sizes'].tolist()
A = W_supports['start_token_id'].item()
A = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A = self.BERT(**UpperCamelCase__ )
A = self.BERT(**UpperCamelCase__ )
A = None
A = None
A = W_supports['input_ids'] == start_token_id
A = W_supports['input_ids'] == end_token_id
for i, size in enumerate(UpperCamelCase__ ):
if i == 0:
A = 0
else:
A = support_sizes[i - 1]
A = S[s : s + size][start_token_masks[s : s + size]]
A = S[s : s + size][end_token_masks[s : s + size]]
A = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A = torch.vstack((p_starts, p_start) )
A = torch.vstack((p_ends, p_end) )
else:
A = p_start
A = p_end
return p_starts, p_ends
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def __UpperCamelCase (lowerCAmelCase : np.ndarray ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray, lowerCAmelCase : int ) -> np.ndarray:
A = np.nan
for i in range(lowerCAmelCase ):
A = features[:, labels == i]
A = data.mean(1 )
# Centralize the data of class i
A = data - column_reshape(lowerCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase, centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
A = np.dot(lowerCAmelCase, centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray, lowerCAmelCase : int ) -> np.ndarray:
A = features.mean(1 )
A = np.nan
for i in range(lowerCAmelCase ):
A = features[:, labels == i]
A = data.shape[1]
A = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase ) - column_reshape(lowerCAmelCase ), (column_reshape(lowerCAmelCase ) - column_reshape(lowerCAmelCase )).T, )
else:
# If covariance_sum is np.nan (i.e. first loop)
A = device_data * np.dot(
column_reshape(lowerCAmelCase ) - column_reshape(lowerCAmelCase ), (column_reshape(lowerCAmelCase ) - column_reshape(lowerCAmelCase )).T, )
return covariance_sum / features.shape[1]
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : int ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
A = features.mean(1 )
# Center the dataset
A = features - np.reshape(lowerCAmelCase, (data_mean.size, 1) )
A = np.dot(lowerCAmelCase, centered_data.T ) / features.shape[1]
A , A = np.linalg.eigh(lowerCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
A = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
A = np.dot(filtered_eigenvectors.T, lowerCAmelCase )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='%(message)s', force=lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : int ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
A , A = eigh(
covariance_between_classes(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), covariance_within_classes(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
A = eigenvectors[:, ::-1][:, :dimensions]
A , A , A = np.linalg.svd(lowerCAmelCase )
A = svd_matrix[:, 0:dimensions]
A = np.dot(filtered_svd_matrix.T, lowerCAmelCase )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR, format='%(message)s', force=lowerCAmelCase )
logging.error('Dataset empty' )
raise AssertionError
def __UpperCamelCase () -> None:
# Create dummy dataset with 2 classes and 3 features
A = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
A = np.array([0, 0, 0, 1, 1] )
A = 2
A = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase ) as error_info:
A = linear_discriminant_analysis(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if isinstance(lowerCAmelCase, np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def __UpperCamelCase () -> None:
A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
A = 2
A = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase ) as error_info:
A = principal_component_analysis(lowerCAmelCase, lowerCAmelCase )
if not np.allclose(lowerCAmelCase, lowerCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = KandinskyVaaPipeline
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''image_embeds''',
'''negative_image_embeds''',
]
SCREAMING_SNAKE_CASE : str = ['''image_embeds''', '''negative_image_embeds''']
SCREAMING_SNAKE_CASE : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE : str = False
@property
def UpperCamelCase ( self : Any ):
return 32
@property
def UpperCamelCase ( self : Tuple ):
return 32
@property
def UpperCamelCase ( self : Optional[int] ):
return self.time_input_dim
@property
def UpperCamelCase ( self : str ):
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : List[Any] ):
return 100
@property
def UpperCamelCase ( self : str ):
torch.manual_seed(0 )
A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def UpperCamelCase ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : List[str] ):
torch.manual_seed(0 )
A = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : int ):
A = self.dummy_unet
A = self.dummy_movq
A = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=UpperCamelCase__ , )
A = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=0 ):
A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('mps' ):
A = torch.manual_seed(UpperCamelCase__ )
else:
A = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCamelCase ( self : Dict ):
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**UpperCamelCase__ )
A = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A = output.images
A = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A = image[0, -3:, -3:, -1]
A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str ):
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
A = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
A = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A = 'red cat, 4k photo'
A = torch.Generator(device='cuda' ).manual_seed(0 )
A , A = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A = torch.Generator(device='cuda' ).manual_seed(0 )
A = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , output_type='np' , )
A = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 699 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 1 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : int ) -> float:
A = u
for i in range(1, lowerCAmelCase ):
A = temp * (u - i)
return temp
def __UpperCamelCase () -> None:
A = int(input('enter the numbers of values: ' ) )
A = []
for _ in range(lowerCAmelCase ):
y.append([] )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
y[i].append(lowerCAmelCase )
A = 0
print('enter the values of parameters in a list: ' )
A = list(map(lowerCAmelCase, input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCAmelCase ):
A = float(input() )
A = int(input('enter the value to interpolate: ' ) )
A = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1, lowerCAmelCase ):
for j in range(n - i ):
A = y[j + 1][i - 1] - y[j][i - 1]
A = y[0][0]
for i in range(1, lowerCAmelCase ):
summ += (ucal(lowerCAmelCase, lowerCAmelCase ) * y[0][i]) / math.factorial(lowerCAmelCase )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 699 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 1 |
import json
import sys
def __UpperCamelCase (lowerCAmelCase : Union[str, Any], lowerCAmelCase : str ) -> Dict:
with open(lowerCAmelCase, encoding='utf-8' ) as f:
A = json.load(lowerCAmelCase )
A = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(lowerCAmelCase ):
A = results[benchmark_name]
A = benchmark_name.split('/' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
A = '| metric |'
A = '|--------|'
A = '| new / old (diff) |'
for metric_name in sorted(lowerCAmelCase ):
A = benchmark_res[metric_name]
A = metric_vals['new']
A = metric_vals.get('old', lowerCAmelCase )
A = metric_vals.get('diff', lowerCAmelCase )
A = f''' {new_val:f}''' if isinstance(lowerCAmelCase, (int, float) ) else 'None'
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(lowerCAmelCase, (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(lowerCAmelCase, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(lowerCAmelCase, 'w', encoding='utf-8' ) as f:
f.writelines('\n'.join(lowerCAmelCase ) )
if __name__ == "__main__":
_UpperCAmelCase = sys.argv[1]
_UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 699 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 16
_UpperCAmelCase = 32
def __UpperCamelCase (lowerCAmelCase : Accelerator, lowerCAmelCase : DatasetDict, lowerCAmelCase : List[int], lowerCAmelCase : List[int], lowerCAmelCase : int = 16 ) -> Dict:
A = AutoTokenizer.from_pretrained('bert-base-cased' )
A = DatasetDict(
{
'train': dataset['train'].select(lowerCAmelCase ),
'validation': dataset['train'].select(lowerCAmelCase ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase, max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A = datasets.map(
lowerCAmelCase, batched=lowerCAmelCase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A = 16
elif accelerator.mixed_precision != "no":
A = 8
else:
A = None
return tokenizer.pad(
lowerCAmelCase, padding='longest', max_length=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_tensors='pt', )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase, collate_fn=lowerCAmelCase, batch_size=lowerCAmelCase )
A = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase, collate_fn=lowerCAmelCase, batch_size=lowerCAmelCase )
A = DataLoader(
tokenized_datasets['test'], shuffle=lowerCAmelCase, collate_fn=lowerCAmelCase, batch_size=lowerCAmelCase )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCamelCase (lowerCAmelCase : Dict, lowerCAmelCase : List[str] ) -> Tuple:
# New Code #
A = []
# Download the dataset
A = load_dataset('glue', 'mrpc' )
# Create our splits
A = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
A = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A = batch_size // MAX_GPU_BATCH_SIZE
A = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase )
# New Code #
# Create our folds:
A = kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
A = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase ):
A , A , A = get_fold_dataloaders(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A = model.to(accelerator.device )
# Instantiate optimizer
A = AdamW(params=model.parameters(), lr=lowerCAmelCase )
# Instantiate scheduler
A = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A = model(**lowerCAmelCase )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**lowerCAmelCase )
A = outputs.logits.argmax(dim=-1 )
A , A = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase, references=lowerCAmelCase, )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', lowerCAmelCase )
# New Code #
# We also run predictions on the test set at the very end
A = []
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**lowerCAmelCase )
A = outputs.logits
A , A = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A = torch.cat(lowerCAmelCase, dim=0 )
A = torch.stack(lowerCAmelCase, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A = metric.compute(predictions=lowerCAmelCase, references=lowerCAmelCase )
accelerator.print('Average test metrics from all folds:', lowerCAmelCase )
def __UpperCamelCase () -> int:
A = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase, default=lowerCAmelCase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=lowerCAmelCase, default=3, help='The number of splits to perform across the dataset' )
A = parser.parse_args()
A = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
main()
| 699 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase = Mapping[str, np.ndarray]
_UpperCAmelCase = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase = 0.0_1
@dataclasses.dataclass(frozen=__lowercase )
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
SCREAMING_SNAKE_CASE : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
SCREAMING_SNAKE_CASE : Optional[str] = None
# Templates used to generate this protein (prediction-only)
SCREAMING_SNAKE_CASE : Optional[Sequence[str]] = None
# Chain corresponding to each parent
SCREAMING_SNAKE_CASE : Optional[Sequence[int]] = None
def __UpperCamelCase (lowerCAmelCase : str ) -> Protein:
A = r'(\[[A-Z]+\]\n)'
A = [tag.strip() for tag in re.split(lowerCAmelCase, lowerCAmelCase ) if len(lowerCAmelCase ) > 0]
A = zip(tags[0::2], [l.split('\n' ) for l in tags[1::2]] )
A = ["N", "CA", "C"]
A = None
A = None
A = None
for g in groups:
if "[PRIMARY]" == g[0]:
A = g[1][0].strip()
for i in range(len(lowerCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
A = 'X' # FIXME: strings are immutable
A = np.array(
[residue_constants.restype_order.get(lowerCAmelCase, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
A = []
for axis in range(3 ):
tertiary.append(list(map(lowerCAmelCase, g[1][axis].split() ) ) )
A = np.array(lowerCAmelCase )
A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase ):
A = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
A = np.array(list(map({'-': 0, '+': 1}.get, g[1][0].strip() ) ) )
A = np.zeros(
(
len(lowerCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCAmelCase ):
A = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCAmelCase, atom_mask=lowerCAmelCase, aatype=lowerCAmelCase, residue_index=np.arange(len(lowerCAmelCase ) ), b_factors=lowerCAmelCase, )
def __UpperCamelCase (lowerCAmelCase : Protein, lowerCAmelCase : int = 0 ) -> List[str]:
A = []
A = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
A = prot.parents
A = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
A = [p for i, p in zip(lowerCAmelCase, lowerCAmelCase ) if i == chain_id]
if parents is None or len(lowerCAmelCase ) == 0:
A = ['N/A']
pdb_headers.append(f'''PARENT {" ".join(lowerCAmelCase )}''' )
return pdb_headers
def __UpperCamelCase (lowerCAmelCase : Protein, lowerCAmelCase : str ) -> str:
A = []
A = pdb_str.split('\n' )
A = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
A = 42
if prot.parents is not None and len(prot.parents ) > 0:
A = []
if prot.parents_chain_index is not None:
A = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCAmelCase ), [] )
parent_dict[str(lowerCAmelCase )].append(lowerCAmelCase )
A = max([int(lowerCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
A = parent_dict.get(str(lowerCAmelCase ), ['N/A'] )
parents_per_chain.append(lowerCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
A = [['N/A']]
def make_parent_line(lowerCAmelCase : Sequence[str] ) -> str:
return f'''PARENT {" ".join(lowerCAmelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
A = 0
for i, l in enumerate(lowerCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCAmelCase ):
A = parents_per_chain[chain_counter]
else:
A = ['N/A']
out_pdb_lines.append(make_parent_line(lowerCAmelCase ) )
return "\n".join(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Protein ) -> str:
A = residue_constants.restypes + ['X']
def res_atoa(lowerCAmelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r], 'UNK' )
A = residue_constants.atom_types
A = []
A = prot.atom_mask
A = prot.aatype
A = prot.atom_positions
A = prot.residue_index.astype(np.intaa )
A = prot.b_factors
A = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
A = get_pdb_headers(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
pdb_lines.extend(lowerCAmelCase )
A = aatype.shape[0]
A = 1
A = 0
A = string.ascii_uppercase
A = None
# Add all atom sites.
for i in range(lowerCAmelCase ):
A = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCAmelCase, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
A = 'ATOM'
A = atom_name if len(lowerCAmelCase ) == 4 else f''' {atom_name}'''
A = ''
A = ''
A = 1.00
A = atom_name[0] # Protein supports only C, N, O, S, this works.
A = ''
A = 'A'
if chain_index is not None:
A = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
A = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(lowerCAmelCase )
atom_index += 1
A = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
A = True
A = chain_index[i + 1]
if should_terminate:
# Close the chain.
A = 'TER'
A = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(lowerCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCAmelCase, lowerCAmelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : Protein ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCamelCase (lowerCAmelCase : FeatureDict, lowerCAmelCase : ModelOutput, lowerCAmelCase : Optional[np.ndarray] = None, lowerCAmelCase : Optional[np.ndarray] = None, lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[Sequence[str]] = None, lowerCAmelCase : Optional[Sequence[int]] = None, ) -> Protein:
return Protein(
aatype=features['aatype'], atom_positions=result['final_atom_positions'], atom_mask=result['final_atom_mask'], residue_index=features['residue_index'] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ), chain_index=lowerCAmelCase, remark=lowerCAmelCase, parents=lowerCAmelCase, parents_chain_index=lowerCAmelCase, )
| 699 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str=13 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=4 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def UpperCamelCase ( self : List[str] ):
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self : List[Any] ):
A = self.prepare_config_and_inputs()
A , A , A , A = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : Any ):
A = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase ( self : Dict ):
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=UpperCamelCase__ )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase ( self : List[str] ):
A = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
A = jnp.array([[0, 1, 2, 3, 4, 5]] )
A = model(UpperCamelCase__ )[0]
A = 50000
A = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCamelCase__ )
A = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 699 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = PegasusTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Any = True
def UpperCamelCase ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
A = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : int ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCamelCase ( self : List[Any] , **UpperCamelCase__ : Any ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
return ("This is a test", "This is a test")
def UpperCamelCase ( self : Optional[Any] ):
A = '</s>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCamelCase ( self : int ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCamelCase__ ) , 1103 )
def UpperCamelCase ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCamelCase ( self : List[Any] ):
A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A = self.tokenizer_class.from_pretrained(self.tmpdirname )
A = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
A = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : List[Any] ):
A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
A = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
A = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any ):
A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
A = 'To ensure a smooth flow of bank resolutions.'
A = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
A = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase ( self : str ):
A = ['This is going to be way too long.' * 150, 'short example']
A = ['not super long but more than 5 tokens', 'tiny']
A = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
A = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase ( self : Tuple ):
# fmt: off
A = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = PegasusTokenizer
SCREAMING_SNAKE_CASE : List[Any] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[str] = True
def UpperCamelCase ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
A = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : int ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCamelCase ( self : List[str] , **UpperCamelCase__ : int ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
return ("This is a test", "This is a test")
def UpperCamelCase ( self : List[Any] ):
A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A = self.tokenizer_class.from_pretrained(self.tmpdirname )
A = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
A = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def UpperCamelCase ( self : Tuple ):
A = ['This is going to be way too long.' * 1000, 'short example']
A = ['not super long but more than 5 tokens', 'tiny']
A = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
A = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def UpperCamelCase ( self : List[Any] ):
A = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
A = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''pixel_values''']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 2_5_5 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
__magic_name__ :Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
__magic_name__ :List[Any] = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
__magic_name__ :List[str] = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__magic_name__ :int = get_size_dict(__lowerCAmelCase )
__magic_name__ :Optional[int] = do_resize
__magic_name__ :List[str] = size
__magic_name__ :List[str] = resample
__magic_name__ :str = do_center_crop
__magic_name__ :Union[str, Any] = crop_size
__magic_name__ :Tuple = do_rescale
__magic_name__ :Optional[Any] = rescale_factor
__magic_name__ :Dict = do_normalize
__magic_name__ :List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ :str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :int = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__magic_name__ :Dict = get_resize_output_image_size(__lowerCAmelCase , size=size['''shortest_edge'''] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Any = get_size_dict(__lowerCAmelCase )
return center_crop(__lowerCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
__magic_name__ :Union[str, Any] = size if size is not None else self.size
__magic_name__ :Dict = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
__magic_name__ :Optional[Any] = resample if resample is not None else self.resample
__magic_name__ :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ :List[str] = crop_size if crop_size is not None else self.crop_size
__magic_name__ :str = get_size_dict(__lowerCAmelCase )
__magic_name__ :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ :Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ :Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ :Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__magic_name__ :Union[str, Any] = image_std if image_std is not None else self.image_std
__magic_name__ :List[Any] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__magic_name__ :Union[str, Any] = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
__magic_name__ :Tuple = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
__magic_name__ :str = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
__magic_name__ :List[str] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
__magic_name__ :Optional[int] = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
__magic_name__ :Dict = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
__magic_name__ :Any = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 0 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
from __future__ import annotations
class __lowerCamelCase :
def __init__( self: Optional[int],A_: str,A_: str ):
'''simple docstring'''
__UpperCamelCase, __UpperCamelCase = text, pattern
__UpperCamelCase, __UpperCamelCase = len(A_ ), len(A_ )
def snake_case_ ( self: Tuple,A_: str ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if char == self.pattern[i]:
return i
return -1
def snake_case_ ( self: List[str],A_: int ):
'''simple docstring'''
for i in range(self.patLen - 1,-1,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase = self.mismatch_in_text(A_ )
if mismatch_index == -1:
positions.append(A_ )
else:
__UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__snake_case = '''ABAABA'''
__snake_case = '''AB'''
__snake_case = BoyerMooreSearch(text, pattern)
__snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = 9
_A = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_A = kruskal(_snake_case , _snake_case )
_A = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_snake_case ) == sorted(_snake_case )
| 2 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
"""simple docstring"""
class a :
def __init__( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = size
lowerCAmelCase = [0] * size
lowerCAmelCase = [0] * size
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCamelCase__ ( _snake_case ):
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = value
while index < self.size:
lowerCAmelCase = self.get_prev(_snake_case ) + 1
if current_left_border == index:
lowerCAmelCase = value
else:
lowerCAmelCase = max(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = self.get_next(_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
right -= 1 # Because of right is exclusive
lowerCAmelCase = 0
while left <= right:
lowerCAmelCase = self.get_prev(_snake_case )
if left <= current_left:
lowerCAmelCase = max(_snake_case , self.tree[right] )
lowerCAmelCase = current_left
else:
lowerCAmelCase = max(_snake_case , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _lowercase ( _lowercase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _lowercase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 5 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCamelCase = 256
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["melgan"]
def __init__( self :List[Any] , __A :SpectrogramNotesEncoder , __A :SpectrogramContEncoder , __A :TaFilmDecoder , __A :DDPMScheduler , __A :OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
SCREAMING_SNAKE_CASE__ = math.log(1E-5 ) # Matches MelGAN training.
SCREAMING_SNAKE_CASE__ = 4.0 # Largest value for most examples
SCREAMING_SNAKE_CASE__ = 128
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def _snake_case ( self :str , __A :List[Any] , __A :Optional[int]=(-1.0, 1.0) , __A :Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = output_range
if clip:
SCREAMING_SNAKE_CASE__ = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self :Dict , __A :Tuple , __A :str=(-1.0, 1.0) , __A :List[str]=False ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input_range
SCREAMING_SNAKE_CASE__ = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
SCREAMING_SNAKE_CASE__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self :Union[str, Any] , __A :Any , __A :List[Any] , __A :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = input_tokens > 0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self :Any , __A :int , __A :str , __A :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = noise_time
if not torch.is_tensor(__A ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self :Dict , __A :List[List[int]] , __A :Optional[torch.Generator] = None , __A :int = 100 , __A :bool = True , __A :str = "numpy" , __A :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A :int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__A )}.''' )
SCREAMING_SNAKE_CASE__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = np.zeros([1, 0, self.n_dims] , np.floataa )
SCREAMING_SNAKE_CASE__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
SCREAMING_SNAKE_CASE__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
SCREAMING_SNAKE_CASE__ = ones
SCREAMING_SNAKE_CASE__ = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
SCREAMING_SNAKE_CASE__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
SCREAMING_SNAKE_CASE__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE__ = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
SCREAMING_SNAKE_CASE__ = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
SCREAMING_SNAKE_CASE__ = mel[:1]
SCREAMING_SNAKE_CASE__ = mel.cpu().float().numpy()
SCREAMING_SNAKE_CASE__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("""Generated segment""" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
SCREAMING_SNAKE_CASE__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
SCREAMING_SNAKE_CASE__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A ) | 6 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : Optional[Any] = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__A : List[str] = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
__A : Any = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> str:
if number >= 0: # Get binary representation of positive number
__A : Optional[Any] = '0' + str(bin(__snake_case ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__A : Union[str, Any] = len(bin(__snake_case )[3:] ) # Find 2's complement of number
__A : Any = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
__A : Optional[Any] = (
'1' + '0' * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *_snake_case : Optional[int] , **_snake_case : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 9 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
from __future__ import annotations
import math
_lowerCAmelCase = "2020.9.26"
_lowerCAmelCase = "xcodz-dot, cclaus, dhruvmanila"
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if not all(isinstance(__snake_case , (float, int) ) for val in locals().values() ):
_UpperCamelCase = f"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(__snake_case )
_UpperCamelCase = ((x * distance) / (z + distance)) * scale
_UpperCamelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Axis must be a str''' )
_UpperCamelCase = locals()
del input_variables["axis"]
if not all(isinstance(__snake_case , (float, int) ) for val in input_variables.values() ):
_UpperCamelCase = (
'''Input values except axis must either be float or int: '''
f"""{list(input_variables.values() )}"""
)
raise TypeError(__snake_case )
_UpperCamelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCamelCase = x * math.cos(__snake_case ) - y * math.sin(__snake_case )
_UpperCamelCase = y * math.cos(__snake_case ) + x * math.sin(__snake_case )
_UpperCamelCase = z
elif axis == "x":
_UpperCamelCase = y * math.cos(__snake_case ) - z * math.sin(__snake_case )
_UpperCamelCase = z * math.cos(__snake_case ) + y * math.sin(__snake_case )
_UpperCamelCase = x
elif axis == "y":
_UpperCamelCase = x * math.cos(__snake_case ) - z * math.sin(__snake_case )
_UpperCamelCase = z * math.cos(__snake_case ) + x * math.sin(__snake_case )
_UpperCamelCase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 10 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__(self , A , A=13 , A=32 , A=3 , A=4 , A=[10, 20, 30, 40] , A=[2, 2, 3, 2] , A=True , A=True , A=37 , A="gelu" , A=10 , A=0.02 , A=["stage2", "stage3", "stage4"] , A=3 , A=None , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = num_stages
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = intermediate_size
_a = hidden_act
_a = type_sequence_label_size
_a = initializer_range
_a = out_features
_a = num_labels
_a = scope
_a = num_stages
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def a__ (self ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=A , loss_ignore_index=255 , num_labels=self.num_labels , )
def a__ (self , A , A , A ) -> Union[str, Any]:
"""simple docstring"""
_a = UperNetForSemanticSegmentation(config=A )
model.to(A )
model.eval()
_a = model(A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def a__ (self ) -> int:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A , A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__lowerCamelCase : Any = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__lowerCamelCase : List[Any] = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : int = False
__lowerCamelCase : str = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : int = False
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = UperNetModelTester(self )
_a = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def a__ (self ) -> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ) -> List[str]:
"""simple docstring"""
return
def a__ (self ) -> Dict:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(A )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a__ (self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def a__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a__ (self ) -> Any:
"""simple docstring"""
pass
def a__ (self ) -> str:
"""simple docstring"""
def check_hidden_states_output(A , A , A ):
_a = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(A , A ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(A , A , A )
def a__ (self ) -> str:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = _config_zero_init(A )
_a = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_a = model_class(config=A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def a__ (self ) -> Tuple:
"""simple docstring"""
pass
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = UperNetForSemanticSegmentation.from_pretrained(A )
self.assertIsNotNone(A )
def lowerCAmelCase ():
"""simple docstring"""
_a = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''')
_a = Image.open(__A).convert('''RGB''')
return image
@require_torch
@require_vision
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(A )
_a = prepare_img()
_a = processor(images=A , return_tensors='''pt''' ).to(A )
with torch.no_grad():
_a = model(**A )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 ) )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
_a = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(A )
_a = prepare_img()
_a = processor(images=A , return_tensors='''pt''' ).to(A )
with torch.no_grad():
_a = model(**A )
_a = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , A )
_a = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 ) )
| 11 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
def UpperCamelCase ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : List[Any] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a__ = logging.getLogger(__name__)
a__ = {'''facebook/bart-base''': BartForConditionalGeneration}
a__ = {'''facebook/bart-base''': BartTokenizer}
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : List[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' ,type=__a ,default=__a ,help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' ,type=__a ,default=5 ,help='''The maximum total input sequence length after tokenization.''' ,)
parser.add_argument(
'''--num_beams''' ,type=__a ,default=__a ,help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) ,)
parser.add_argument(
'''--model_name_or_path''' ,type=__a ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__a ,)
parser.add_argument(
'''--config_name''' ,type=__a ,default=__a ,help='''Pretrained config name or path if not the same as model_name''' ,)
parser.add_argument(
'''--device''' ,type=__a ,default='''cpu''' ,help='''Device where the model will be run''' ,)
parser.add_argument('''--output_file_path''' ,type=__a ,default=__a ,help='''Where to store the final ONNX file.''' )
_a : Optional[Any] = parser.parse_args()
return args
def __UpperCAmelCase ( __a : Any ,__a : Dict="cpu" ) -> List[Any]:
"""simple docstring"""
_a : int = model_dict[model_name].from_pretrained(__a ).to(__a )
_a : int = tokenizer_dict[model_name].from_pretrained(__a )
if model_name in ["facebook/bart-base"]:
_a : Dict = 0
_a : Union[str, Any] = None
_a : Optional[int] = 0
return huggingface_model, tokenizer
def __UpperCAmelCase ( __a : str ,__a : Dict ,__a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
model.eval()
_a : Optional[int] = None
_a : str = torch.jit.script(BARTBeamSearchGenerator(__a ) )
with torch.no_grad():
_a : List[Any] = '''My friends are cool but they eat too many carbs.'''
_a : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1_024 ,return_tensors='''pt''' ).to(model.device )
_a : List[str] = model.generate(
inputs['''input_ids'''] ,attention_mask=inputs['''attention_mask'''] ,num_beams=__a ,max_length=__a ,early_stopping=__a ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
__a ,(
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,__a ,opset_version=14 ,input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] ,output_names=['''output_ids'''] ,dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} ,example_outputs=__a ,)
logger.info('''Model exported to {}'''.format(__a ) )
_a : int = remove_dup_initializers(os.path.abspath(__a ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__a ) )
_a : Tuple = onnxruntime.InferenceSession(__a )
_a : List[Any] = ort_sess.run(
__a ,{
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__a ),
'''max_length''': np.array(__a ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1E-3 ,atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = parse_args()
_a : str = 5
_a : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_a : Optional[Any] = torch.device(args.device )
_a , _a : List[str] = load_model_tokenizer(args.model_name_or_path ,__a )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__a )
if args.max_length:
_a : Tuple = args.max_length
if args.num_beams:
_a : Dict = args.num_beams
if args.output_file_path:
_a : Optional[Any] = args.output_file_path
else:
_a : Dict = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__a ,__a ,__a ,__a ,__a )
if __name__ == "__main__":
main()
| 14 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
import doctest
from collections import deque
import numpy as np
class A :
'''simple docstring'''
def __init__(self : List[str] ) -> None:
"""simple docstring"""
lowercase__ = [2, 1, 2, -1]
lowercase__ = [1, 2, 3, 4]
def lowerCamelCase__ (self : Tuple ) -> list[float]:
"""simple docstring"""
lowercase__ = len(self.first_signal )
lowercase__ = len(self.second_signal )
lowercase__ = max(_UpperCAmelCase , _UpperCAmelCase )
# create a zero matrix of max_length x max_length
lowercase__ = [[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
lowercase__ = deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ = np.matmul(np.transpose(_UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 15 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A : Optional[Any] = datasets.utils.logging.get_logger(__name__)
__A : Any = ['names', 'prefix']
__A : Tuple = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
__A : Dict = ['encoding_errors', 'on_bad_lines']
__A : str = ['date_format']
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ = ","
lowerCamelCase__ = None
lowerCamelCase__ = "infer"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = "."
lowerCamelCase__ = None
lowerCamelCase__ = '"'
lowerCamelCase__ = 0
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = "strict"
lowerCamelCase__ = "error"
lowerCamelCase__ = None
def _snake_case ( self : Tuple ):
if self.delimiter is not None:
SCREAMING_SNAKE_CASE = self.delimiter
if self.column_names is not None:
SCREAMING_SNAKE_CASE = self.column_names
@property
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __lowerCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ = CsvConfig
def _snake_case ( self : int ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self : Any , __lowerCamelCase : str ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCamelCase , gen_kwargs={"files": files} ) )
return splits
def _snake_case ( self : List[Any] , __lowerCamelCase : pa.Table ):
if self.config.features is not None:
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
SCREAMING_SNAKE_CASE = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__lowerCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(__lowerCamelCase , __lowerCamelCase )
return pa_table
def _snake_case ( self : Optional[int] , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
SCREAMING_SNAKE_CASE = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCamelCase ) ):
SCREAMING_SNAKE_CASE = pd.read_csv(__lowerCamelCase , iterator=__lowerCamelCase , dtype=__lowerCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = pa.Table.from_pandas(__lowerCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCamelCase )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(__lowerCamelCase )}: {e}" )
raise | 16 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : int , __A : int , __A : Optional[Any]=13 , __A : Any=7 , __A : Any=True , __A : List[Any]=True , __A : str=True , __A : int=True , __A : Optional[Any]=99 , __A : Dict=32 , __A : int=5 , __A : Optional[int]=4 , __A : Dict=37 , __A : List[str]="gelu" , __A : str=0.1 , __A : int=0.1 , __A : Optional[int]=512 , __A : Optional[int]=16 , __A : List[str]=2 , __A : List[Any]=0.0_2 , __A : int=4 , ):
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : int = seq_length
__A : Any = is_training
__A : Optional[int] = use_attention_mask
__A : List[Any] = use_token_type_ids
__A : Optional[int] = use_labels
__A : Optional[Any] = vocab_size
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : Optional[int] = hidden_act
__A : Any = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : Tuple = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : List[str] = initializer_range
__A : List[Any] = num_choices
def lowerCAmelCase_ ( self : List[Any] ):
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = None
if self.use_attention_mask:
__A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__A : Any = None
if self.use_token_type_ids:
__A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self : int ):
__A : Optional[int] = self.prepare_config_and_inputs()
__A , __A , __A , __A : Tuple = config_and_inputs
__A : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple ):
__A : Tuple = self.prepare_config_and_inputs()
__A , __A , __A , __A : List[Any] = config_and_inputs
__A : Optional[int] = True
__A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = True
_lowercase : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self : List[str] ):
__A : List[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
__A : List[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : int ):
__A : Dict = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : str = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : List[Any] = model(__A )[0]
__A : str = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __A )
# compare the actual values for a slice.
__A : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Any ):
__A : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__A )
__A : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__A : Optional[int] = model(__A )[0]
# compare the actual values for a slice.
__A : Optional[int] = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 17 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
'''simple docstring'''
from collections import namedtuple
_SCREAMING_SNAKE_CASE = namedtuple("from_to", "from_ to")
_SCREAMING_SNAKE_CASE = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 10_00),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = (DDPMScheduler,)
def UpperCAmelCase ( self , **__a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__a)
return config
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1e-5
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''')
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = len(__a)
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0)
for t in reversed(range(__a)):
# 1. predict noise residual
_UpperCamelCase = model(__a , __a)
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__a , __a , __a , generator=__a).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__a))
_UpperCamelCase = torch.mean(torch.abs(__a))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a)
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__a):
if i == len(__a) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__a)
_UpperCamelCase = prev_t.item()
self.assertEqual(__a , __a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [1_00, 87, 50, 1, 0]
_UpperCamelCase = len(__a)
with self.assertRaises(__a , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__a)
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__a)
| 19 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
Subsets and Splits