code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : Union[str, Any] ={'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()['__file__'], _import_structure)
| 9 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
return [ord(lowerCamelCase__ ) - 9_6 for elem in plain]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase__ )
print('Decoded:' , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 113 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase__ , lowerCamelCase__ ) -> bool:
__lowerCamelCase : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__lowerCamelCase : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase__ ) )
# The ratio of the area for circle to square is pi/4.
__lowerCamelCase : Any = proportion * 4
print(F"The estimated value of pi is {pi_estimate}" )
print(F"The numpy value of pi is {pi}" )
print(F"The total error is {abs(pi - pi_estimate )}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCamelCase__ , lowerCamelCase__ ) ) for _ in range(lowerCamelCase__ ) ) * (max_value - min_value)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ) -> None:
def identity_function(lowerCamelCase__ ) -> float:
return x
__lowerCamelCase : str = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : int = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {expected_value}" )
print(F"Total error is {abs(estimated_value - expected_value )}" )
print('******************' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
def function_to_integrate(lowerCamelCase__ ) -> float:
return sqrt(4.0 - x * x )
__lowerCamelCase : Any = area_under_curve_estimator(
lowerCamelCase__ , lowerCamelCase__ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F"Estimated value is {estimated_value}" )
print(F"Expected value is {pi}" )
print(F"Total error is {abs(estimated_value - pi )}" )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 | 1 |
from __future__ import annotations
from collections import namedtuple
def A_ ( a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase : List[str] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase : Tuple = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(a )
# emb -> embedding
if name.startswith('emb.' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , a )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE_ : Any = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
SCREAMING_SNAKE_CASE_ : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
SCREAMING_SNAKE_CASE_ : Any = 'rwkv.' + name
SCREAMING_SNAKE_CASE_ : Dict = weight
return state_dict
def A_ ( a , a , a , a=None , a=None , a=False , a=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 5_0_2_7_7
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(a )
tokenizer.save_pretrained(a )
# 2. Build the config
SCREAMING_SNAKE_CASE_ : List[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE_ : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
SCREAMING_SNAKE_CASE_ : str = RwkvConfig(
vocab_size=a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download(a , a )
SCREAMING_SNAKE_CASE_ : int = torch.load(a , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_state_dict(a )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = shard_checkpoint(a )
for shard_file, shard in shards.items():
torch.save(a , os.path.join(a , a ) )
if index is not None:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
# Save the index as well
with open(a , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : int = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
f.write(a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
SCREAMING_SNAKE_CASE_ : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(os.path.join(a , a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a , a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(a )
model.push_to_hub(a , max_shard_size='2GB' )
tokenizer.push_to_hub(a )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 253 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase__ = get_logger()
lowerCamelCase__ = None
class A__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , a : int=None , a : int=None , **a : Optional[int] ):
'''simple docstring'''
super().__init__(features=__UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
lowerCAmelCase__ : Union[str, Any] = device if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase__ : Optional[Any] = str(jax.devices()[0] )
lowerCAmelCase__ : List[str] = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__UpperCAmelCase ): device for device in jax.devices()}
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and column:
if all(
isinstance(__UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCAmelCase , axis=0 )
return column
def _lowerCamelCase ( self : Dict , a : Optional[int] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCAmelCase , (str, bytes, type(__UpperCAmelCase )) ):
return value
elif isinstance(__UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase__ : List[Any] = {}
if isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase__ : int = {'dtype': jnp.intaa}
else:
lowerCAmelCase__ : str = {'dtype': jnp.intaa}
elif isinstance(__UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase__ : Optional[Any] = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCAmelCase , PIL.Image.Image ):
lowerCAmelCase__ : str = np.asarray(__UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase__ : Union[str, Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowerCamelCase ( self : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCAmelCase , '__array__' ) and not isinstance(__UpperCAmelCase , jax.Array ):
lowerCAmelCase__ : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCAmelCase )
def _lowerCamelCase ( self : Optional[int] , a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCAmelCase , map_list=__UpperCAmelCase )
def _lowerCamelCase ( self : str , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.numpy_arrow_extractor().extract_row(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.python_features_decoder.decode_row(__UpperCAmelCase )
return self.recursive_tensorize(__UpperCAmelCase )
def _lowerCamelCase ( self : int , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.numpy_arrow_extractor().extract_column(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = self.python_features_decoder.decode_column(__UpperCAmelCase , pa_table.column_names[0] )
lowerCAmelCase__ : Optional[int] = self.recursive_tensorize(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self._consolidate(__UpperCAmelCase )
return column
def _lowerCamelCase ( self : Union[str, Any] , a : pa.Table ):
'''simple docstring'''
lowerCAmelCase__ : int = self.numpy_arrow_extractor().extract_batch(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.python_features_decoder.decode_batch(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.recursive_tensorize(__UpperCAmelCase )
for column_name in batch:
lowerCAmelCase__ : Any = self._consolidate(batch[column_name] )
return batch
| 354 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.999 , SCREAMING_SNAKE_CASE_="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase__ : Tuple = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = i / num_diffusion_timesteps
lowerCAmelCase__ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
class A__ ( __magic_name__ , __magic_name__ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self : Union[str, Any] , a : int = 1_000 , a : float = 0.0_0_0_8_5 , a : float = 0.0_1_2 , a : str = "linear" , a : Optional[Union[np.ndarray, List[float]]] = None , a : str = "epsilon" , a : Optional[bool] = False , a : Optional[bool] = False , a : float = 1.0 , a : str = "linspace" , a : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
lowerCAmelCase__ : List[str] = torch.tensor(a , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase__ : List[str] = torch.linspace(a , a , a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : int = betas_for_alpha_bar(a , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
lowerCAmelCase__ : List[str] = betas_for_alpha_bar(a , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase__ : int = 1.0 - self.betas
lowerCAmelCase__ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a , a , a )
lowerCAmelCase__ : Optional[Any] = use_karras_sigmas
def _lowerCamelCase ( self : str , a : List[Any] , a : str=None ):
'''simple docstring'''
if schedule_timesteps is None:
lowerCAmelCase__ : List[str] = self.timesteps
lowerCAmelCase__ : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase__ : List[str] = 1 if len(a ) > 1 else 0
else:
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
lowerCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.index_for_timestep(a )
lowerCAmelCase__ : Any = self.sigmas[step_index]
lowerCAmelCase__ : Optional[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : List[str] , a : int , a : Union[str, torch.device] = None , a : Optional[int] = None , ):
'''simple docstring'''
lowerCAmelCase__ : Any = num_inference_steps
lowerCAmelCase__ : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase__ : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , a , dtype=a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase__ : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : Dict = (np.arange(0 , a ) * step_ratio).round()[::-1].copy().astype(a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase__ : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase__ : int = (np.arange(a , 0 , -step_ratio )).round().copy().astype(a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase__ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase__ : List[Any] = np.log(a )
lowerCAmelCase__ : Optional[int] = np.interp(a , np.arange(0 , len(a ) ) , a )
if self.config.use_karras_sigmas:
lowerCAmelCase__ : str = self._convert_to_karras(in_sigmas=a , num_inference_steps=self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = np.array([self._sigma_to_t(a , a ) for sigma in sigmas] )
lowerCAmelCase__ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase__ : Dict = torch.from_numpy(a ).to(device=a )
lowerCAmelCase__ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase__ : Tuple = torch.from_numpy(a )
lowerCAmelCase__ : List[str] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a ).startswith('mps' ):
# mps does not support float64
lowerCAmelCase__ : Optional[Any] = timesteps.to(a , dtype=torch.floataa )
else:
lowerCAmelCase__ : Any = timesteps.to(device=a )
# empty dt and derivative
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase__ : Optional[Any] = defaultdict(a )
def _lowerCamelCase ( self : Any , a : Dict , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.log(a )
# get distribution
lowerCAmelCase__ : Tuple = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase__ : Optional[int] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase__ : List[str] = low_idx + 1
lowerCAmelCase__ : List[str] = log_sigmas[low_idx]
lowerCAmelCase__ : Any = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase__ : Union[str, Any] = (low - log_sigma) / (low - high)
lowerCAmelCase__ : List[Any] = np.clip(a , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase__ : List[Any] = (1 - w) * low_idx + w * high_idx
lowerCAmelCase__ : Any = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self : Tuple , a : torch.FloatTensor , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : float = in_sigmas[-1].item()
lowerCAmelCase__ : float = in_sigmas[0].item()
lowerCAmelCase__ : Tuple = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase__ : Tuple = np.linspace(0 , 1 , a )
lowerCAmelCase__ : Any = sigma_min ** (1 / rho)
lowerCAmelCase__ : Optional[Any] = sigma_max ** (1 / rho)
lowerCAmelCase__ : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.dt is None
def _lowerCamelCase ( self : List[str] , a : Union[torch.FloatTensor, np.ndarray] , a : Union[float, torch.FloatTensor] , a : Union[torch.FloatTensor, np.ndarray] , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.index_for_timestep(a )
# advance index counter by 1
lowerCAmelCase__ : Tuple = timestep.cpu().item() if torch.is_tensor(a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index]
lowerCAmelCase__ : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase__ : int = self.sigmas[step_index - 1]
lowerCAmelCase__ : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase__ : Dict = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase__ : List[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase__ : int = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase__ : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase__ : Dict = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase__ : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase__ : List[Any] = derivative
lowerCAmelCase__ : str = dt
lowerCAmelCase__ : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase__ : Union[str, Any] = (sample - pred_original_sample) / sigma_next
lowerCAmelCase__ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase__ : Dict = self.dt
lowerCAmelCase__ : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a )
def _lowerCamelCase ( self : int , a : torch.FloatTensor , a : torch.FloatTensor , a : torch.FloatTensor , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a ):
# mps does not support float64
lowerCAmelCase__ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase__ : int = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase__ : Union[str, Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase__ : Optional[Any] = timesteps.to(original_samples.device )
lowerCAmelCase__ : List[Any] = [self.index_for_timestep(a , a ) for t in timesteps]
lowerCAmelCase__ : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase__ : Any = sigma.unsqueeze(-1 )
lowerCAmelCase__ : List[str] = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps | 307 | 0 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 5 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase : Any = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase : str = features.copy() if features else default_expected_features
lowercase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
lowercase : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = tmp_path / """cache"""
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase : List[str] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Dict = tmp_path / """cache"""
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase : Optional[int] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : str = tmp_path / """cache"""
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 20 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = Mock()
snake_case__ : List[Any] = conn, Mock()
snake_case__ : Optional[Any] = iter([1, None] )
snake_case__ : str = lambda __lowerCAmelCase : next(__lowerCAmelCase )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__lowerCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 230 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = do_rescale
_A: Any = rescale_factor
_A: List[Any] = do_pad
_A: Tuple = pad_size
def __magic_name__ ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_A , _A: Optional[int] = get_image_size(lowerCAmelCase_ )
_A: Union[str, Any] = (old_height // size + 1) * size - old_height
_A: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A: int = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: str = do_pad if do_pad is not None else self.do_pad
_A: Union[str, Any] = pad_size if pad_size is not None else self.pad_size
_A: List[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_A: Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: str = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_A: str = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_A: Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 121 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : str = x_num * y_den + x_den * y_num
__a : List[str] = x_den * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : int = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : List[str] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : Optional[int] = x_num * y_num
__a : int = x_den * y_num + x_num * y_den
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : int = x_num * x_num * y_num * y_num
__a : Any = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 367 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowercase : int = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowercase : Any = [0, 25, 50]
__lowercase : int = [25, 50, 75]
__lowercase : List[str] = fuzz.membership.trimf(X, abca)
__lowercase : Any = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowercase : List[Any] = np.ones(75)
__lowercase : Any = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowercase : int = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowercase : str = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowercase : List[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowercase : Optional[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowercase : str = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowercase : Optional[Any] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowercase : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 294 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 113 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__UpperCamelCase = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 65536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 48000,
'''sample_size''': 131072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 16000,
'''sample_size''': 65536,
},
}
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
return torch.atana(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / math.pi * 2
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCAmelCase__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(F'wget {url} ./' )
return F'./{model_name}.ckpt'
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__UpperCamelCase = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__UpperCamelCase = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__UpperCamelCase = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__UpperCamelCase = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__UpperCamelCase = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif name.startswith(SCREAMING_SNAKE_CASE_ ):
return [name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for v in value]
raise ValueError(F'Attn error with {name}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=13 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'down_blocks.{depth}'
elif depth > 0 and int(SCREAMING_SNAKE_CASE_ ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = F'up_blocks.{max_depth - 1}' if int(SCREAMING_SNAKE_CASE_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(SCREAMING_SNAKE_CASE_ )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(SCREAMING_SNAKE_CASE_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = transform_conv_attns(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE = download(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE_ , sample_rate=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(SCREAMING_SNAKE_CASE_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE_ )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE_ ) == 0, F'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE_ ) ), F'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 1_00
SCREAMING_SNAKE_CASE = 33
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE_ )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , SCREAMING_SNAKE_CASE_ )
print('Diff max' , SCREAMING_SNAKE_CASE_ )
assert diff_max < 1E-3, F'Diff max: {diff_max} is too much :-/'
print(F'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
main(args)
| 113 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowercase__ )
@dataclass
class __a :
__snake_case : List[str] = list_field(
default=[] ,metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} ,)
__snake_case : List[int] = list_field(
default=[8] ,metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
__snake_case : List[int] = list_field(
default=[8, 32, 128, 512] ,metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} ,)
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} ,)
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} ,)
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Use FP16 to accelerate inference."""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Benchmark training of model"""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Verbose memory tracing"""} )
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} ,)
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} ,)
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Trace memory line by line"""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Save result to a CSV file"""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Save all print statements in a log file"""} )
__snake_case : bool = field(default=__UpperCamelCase ,metadata={"""help""": """Whether to print environment information"""} )
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} ,)
__snake_case : str = field(
default=f'inference_time_{round(time() )}.csv' ,metadata={"""help""": """CSV filename used if saving time results to csv."""} ,)
__snake_case : str = field(
default=f'inference_memory_{round(time() )}.csv' ,metadata={"""help""": """CSV filename used if saving memory results to csv."""} ,)
__snake_case : str = field(
default=f'train_time_{round(time() )}.csv' ,metadata={"""help""": """CSV filename used if saving time results to csv for training."""} ,)
__snake_case : str = field(
default=f'train_memory_{round(time() )}.csv' ,metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} ,)
__snake_case : str = field(
default=f'env_info_{round(time() )}.csv' ,metadata={"""help""": """CSV filename used if saving environment information."""} ,)
__snake_case : str = field(
default=f'log_{round(time() )}.csv' ,metadata={"""help""": """Log filename used if print statements are saved in log."""} ,)
__snake_case : int = field(default=3 ,metadata={"""help""": """Times an experiment will be run."""} )
__snake_case : bool = field(
default=__UpperCamelCase ,metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} ,)
def A ( self : Any ):
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , UpperCAmelCase , )
def A ( self : Dict ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A ( self : Union[str, Any] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def A ( self : Union[str, Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 28 |
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = DanceDiffusionPipeline
lowerCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowerCAmelCase__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase , use_timestep_embedding=lowercase , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowerCamelCase_ = IPNDMScheduler()
lowerCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=0 ) -> Tuple:
if str(lowercase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(lowercase )
else:
lowerCamelCase_ = torch.Generator(device=lowercase ).manual_seed(lowercase )
lowerCamelCase_ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = DanceDiffusionPipeline(**lowercase )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = self.get_dummy_inputs(lowercase )
lowerCamelCase_ = pipe(**lowercase )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCamelCase_ = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> int:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = torch_device
lowerCamelCase_ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = torch_device
lowerCamelCase_ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = pipe(generator=lowercase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCamelCase_ = output.audios
lowerCamelCase_ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 19 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase : Tuple = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : str = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = "markuplm"
def __init__( self , _SCREAMING_SNAKE_CASE=3_0522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=216 , _SCREAMING_SNAKE_CASE=1001 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> Tuple:
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
# additional properties
lowerCamelCase_ =max_depth
lowerCamelCase_ =max_xpath_tag_unit_embeddings
lowerCamelCase_ =max_xpath_subs_unit_embeddings
lowerCamelCase_ =tag_pad_id
lowerCamelCase_ =subs_pad_id
lowerCamelCase_ =xpath_unit_hidden_size
| 49 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = '▁'
__A : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = BertGenerationTokenizer
_UpperCamelCase:List[str] = False
_UpperCamelCase:List[Any] = True
def _snake_case ( self )-> Optional[int]:
super().setUp()
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self )-> Any:
lowerCamelCase_ ="""<s>"""
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1002 )
def _snake_case ( self )-> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =BertGenerationTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [285, 46, 10, 170, 382] , )
lowerCamelCase_ =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _snake_case ( self )-> str:
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ ="""Hello World!"""
lowerCamelCase_ =[1_8536, 2260, 101]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase_ =[
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@require_torch
@slow
def _snake_case ( self )-> Any:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCamelCase_ =list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase_ =""" """.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =BertGenerationConfig()
lowerCamelCase_ =BertGenerationEncoder(_SCREAMING_SNAKE_CASE )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_SCREAMING_SNAKE_CASE )
model(**_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )-> int:
# fmt: off
lowerCamelCase_ ={"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 49 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
__snake_case =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__snake_case =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def a_ ( lowerCamelCase : list[list[int]] ):
lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase )
return next_generation
def a_ ( lowerCamelCase : list[list[int]] , lowerCamelCase : int ):
lowerCAmelCase = []
for _ in range(lowerCamelCase ):
# Create output image
lowerCAmelCase = Image.new('RGB' , (len(cells[0] ), len(lowerCamelCase )) )
lowerCAmelCase = img.load()
# Save cells to image
for x in range(len(lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase = 255 - cells[y][x] * 255
lowerCAmelCase = (colour, colour, colour)
# Save image
images.append(lowerCamelCase )
lowerCAmelCase = new_generation(lowerCamelCase )
return images
if __name__ == "__main__":
__snake_case =generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4 |
'''simple docstring'''
def a_ ( lowerCamelCase : Optional[Any] ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def a_ ( lowerCamelCase : dict[int, list[int]] ):
lowerCAmelCase = 0
lowerCAmelCase = len(lowerCamelCase ) # No of vertices in graph
lowerCAmelCase = [0] * n
lowerCAmelCase = [False] * n
def dfs(lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : str ):
lowerCAmelCase = True
lowerCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
lowerCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCAmelCase = min(low[at] , low[to] )
lowerCAmelCase = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Dict = logging.get_logger(__name__)
def A (__A : Optional[int] , __A : Dict=False , __A : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(F"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A (__A : Dict , __A : Union[str, Any] , __A : List[str]=False , __A : Optional[int]=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = q_bias
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
UpperCAmelCase_ = gamma_a
UpperCAmelCase_ = gamma_a
def A (__A : Optional[Any] , __A : List[str] , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__A )
UpperCAmelCase_ = val
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A (__A : str , __A : List[str] , __A : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase_ = BeitConfig(use_absolute_position_embeddings=__A , use_mask_token=__A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase_ = 16
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''rvlcdip-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )['''model''']
UpperCAmelCase_ = create_rename_keys(__A , has_lm_head=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , has_lm_head=__A )
# load HuggingFace model
UpperCAmelCase_ = BeitForMaskedImageModeling(__A ) if has_lm_head else BeitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# Check outputs on an image
UpperCAmelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__A )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__A , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = model(__A )
UpperCAmelCase_ = outputs.logits
# verify logits
UpperCAmelCase_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__A ), "Shape of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
if has_lm_head:
UpperCAmelCase_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__A , )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__A , )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
snake_case_ : List[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case_ = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case_ = get_activation("gelu" )
snake_case_ = get_activation("gelu_10" )
snake_case_ = torch_builtin(UpperCAmelCase__ )
snake_case_ = geluaa(UpperCAmelCase__ )
snake_case_ = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation("bogus" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ = get_activation("gelu" )
snake_case_ = 1
snake_case_ = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
snake_case_ = acta.a
| 159 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''mvp'''
UpperCamelCase : Union[str, Any] = ['''past_key_values''']
UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]:
_a : Any = vocab_size
_a : Any = max_position_embeddings
_a : Union[str, Any] = d_model
_a : List[str] = encoder_ffn_dim
_a : List[Any] = encoder_layers
_a : Dict = encoder_attention_heads
_a : Tuple = decoder_ffn_dim
_a : List[Any] = decoder_layers
_a : Optional[Any] = decoder_attention_heads
_a : Optional[Any] = dropout
_a : str = attention_dropout
_a : Dict = activation_dropout
_a : Any = activation_function
_a : Tuple = init_std
_a : Dict = encoder_layerdrop
_a : Optional[int] = decoder_layerdrop
_a : Optional[Any] = classifier_dropout
_a : List[Any] = use_cache
_a : Dict = encoder_layers
_a : str = scale_embedding # scale factor will be sqrt(d_model) if True
_a : int = use_prompt
_a : Dict = prompt_length
_a : Dict = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ):
_a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 294 | 0 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Any = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = 8 ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
A__ = pad_size
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ) -> np.ndarray:
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> str:
A__ , A__ = get_image_size(__UpperCAmelCase )
A__ = (old_height // size + 1) * size - old_height
A__ = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase ,((0, pad_height), (0, pad_width)) ,mode='symmetric' ,data_format=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> Optional[int]:
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_pad if do_pad is not None else self.do_pad
A__ = pad_size if pad_size is not None else self.pad_size
A__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ) for image in images]
if do_pad:
A__ = [self.pad(__UpperCAmelCase ,size=__UpperCAmelCase ) for image in images]
A__ = [to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase ) | 356 | """simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCamelCase = 20_48
__lowerCamelCase = 40_96
__lowerCamelCase = 42
__lowerCamelCase = os.environ.pop("PROCESS_TRAIN", "false")
__lowerCamelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def choose_first(UpperCamelCase__ , UpperCamelCase__=False ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
A__ = {'id': example['id']}
A__ = example['annotations']
A__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ['yes'] if 1 in yes_no_answer else ['no']
A__ = A__ = []
A__ = A__ = []
A__ = ['<cls>']
else:
A__ = ['short']
A__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
A__ = ['long']
A__ = choose_first(annotation['long_answer'] , is_long_answer=UpperCamelCase__ )
A__ = []
answer.update(UpperCamelCase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
A__ = _get_single_answer(UpperCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example['document']['tokens']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example['document']['tokens']
A__ = answer['start_token']
A__ = answer['end_token']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc['is_html'][answer['start_token'] : answer['end_token']]
A__ = doc['token'][answer['start_token'] : answer['end_token']]
A__ = ' '.join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , UpperCamelCase__ , end='\n' )
print('Old:' , UpperCamelCase__ , end='\n\n' )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=True ):
"""simple docstring"""
A__ = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ )
A__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCamelCase__ ),
"end_token": [-100] * len(UpperCamelCase__ ),
"category": category,
},
}
A__ = out['context'].split()
A__ = splitted_context[answer['end_token']]
A__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=UpperCamelCase__ , ).input_ids )
A__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=UpperCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
A__ = answer['start_token']
A__ = answer['end_token']
if assertion:
A__ = tokenizer.decode(UpperCamelCase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , UpperCamelCase__ , end='\n\n' )
if len(UpperCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append('null' )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCamelCase__ )
answers_end_token.append(UpperCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(UpperCamelCase__ ) )
print('Old:' , tokenizer.decode(UpperCamelCase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=False ):
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , )
return example
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with jsonlines.open(UpperCamelCase__ , 'a' ) as writer:
for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc='Saving samples ... ' ):
A__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCamelCase = load_dataset("natural_questions")
__lowerCamelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__lowerCamelCase = data["train" if PROCESS_TRAIN == "true" else "validation"]
__lowerCamelCase = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__lowerCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCamelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__lowerCamelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 154 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
def lowerCamelCase__ ( A__ : type , A__ : Optional[str] , A__ : Optional[List[str]] = None , ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
__lowerCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
__lowerCamelCase = format_type
def lowerCamelCase__ ( A__ : Exception , A__ : Optional[str] , A__ : Optional[List[str]] = None ):
'''simple docstring'''
__lowerCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__lowerCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase_ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase_ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase_ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase__ ( A__ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( A__ : Optional[str] , **A__ : Dict ):
'''simple docstring'''
__lowerCamelCase = get_format_type_from_alias(A__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**A__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 368 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__snake_case :Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
self.check_model_type(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a , __a = {}, {}
if padding is not None:
__a = padding
if truncation is not None:
__a = truncation
if top_k is not None:
__a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union["Image.Image", str] , __SCREAMING_SNAKE_CASE : str = None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , (Image.Image, str)) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = {'''image''': image, '''question''': question}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
model_inputs.update(__SCREAMING_SNAKE_CASE)
return model_inputs
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.model(**__SCREAMING_SNAKE_CASE)
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=5):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.sigmoid()[0]
__a , __a = probs.topk(__SCREAMING_SNAKE_CASE)
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)]
| 49 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
__a = LxmertConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__a = LxmertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__snake_case :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 49 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 | import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase ( __lowercase : Any ,__lowercase : str ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = _TestCommandArgs(dataset=__lowercase ,all_configs=__lowercase ,save_infos=__lowercase )
A_ : List[Any] = TestCommand(*__lowercase )
test_command.run()
A_ : Any = os.path.join(__lowercase ,'README.md' )
assert os.path.exists(__lowercase )
A_ : Tuple = DatasetInfosDict.from_directory(__lowercase )
A_ : Any = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) ,splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] ,download_size=3_94_06_80 ,dataset_size=2_58_99_81 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A_ , A_ : Union[str, Any] = getattr(dataset_infos['default'] ,__lowercase ), getattr(expected_dataset_infos['default'] ,__lowercase )
if key == "num_bytes":
assert is_apercent_close(__lowercase ,__lowercase )
elif key == "splits":
assert list(__lowercase ) == list(__lowercase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 192 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> str:
'''simple docstring'''
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', 'beit.embeddings.cls_token'),
(f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'{prefix}pos_embed', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=False ) -> List[str]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
A__ = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
A__ = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
A__ = gamma_a
A__ = gamma_a
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( ) -> str:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> str:
'''simple docstring'''
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
lowercase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = 0
A__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
A__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A__ = left
A__ = point
elif point > right:
A__ = right
A__ = point
else:
if item < current_item:
A__ = point - 1
else:
A__ = point + 1
return None
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif point > right:
return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 )
else:
return interpolation_search_by_recursion(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
if collection != sorted(SCREAMING_SNAKE_CASE__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 7 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def UpperCamelCase ( ):
print("Making key files..." )
make_key_files("rsa" , 10_24 )
print("Key files generation successful." )
def UpperCamelCase ( _lowerCamelCase : int ):
print("Generating prime p..." )
A__ = rabinMiller.generate_large_prime(_lowerCamelCase )
print("Generating prime q..." )
A__ = rabinMiller.generate_large_prime(_lowerCamelCase )
A__ = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
A__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_lowerCamelCase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
A__ = cryptoMath.find_mod_inverse(_lowerCamelCase , (p - 1) * (q - 1) )
A__ = (n, e)
A__ = (n, d)
return (public_key, private_key)
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : int ):
if os.path.exists(F"{name}_pubkey.txt" ) or os.path.exists(F"{name}_privkey.txt" ):
print("\nWARNING:" )
print(
F"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
"Use a different name or delete these files and re-run this program." )
sys.exit()
A__, A__ = generate_key(_lowerCamelCase )
print(F"\nWriting public key to file {name}_pubkey.txt..." )
with open(F"{name}_pubkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{public_key[0]},{public_key[1]}" )
print(F"Writing private key to file {name}_privkey.txt..." )
with open(F"{name}_privkey.txt" , "w" ) as out_file:
out_file.write(F"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 123 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return (pow(_lowerCamelCase , 2 ) + step) % modulus
for _ in range(_lowerCamelCase ):
# These track the position within the cycle detection logic.
A__ = seed
A__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A__ = gcd(hare - tortoise , _lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
__lowerCAmelCase : Dict =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__lowerCAmelCase : Optional[Any] =args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 123 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a__ : Optional[Any] = {
'169M': 1_2,
'430M': 2_4,
'1B5': 2_4,
'3B': 3_2,
'7B': 3_2,
'14B': 4_0,
}
a__ : str = {
'169M': 7_6_8,
'430M': 1_0_2_4,
'1B5': 2_0_4_8,
'3B': 2_5_6_0,
'7B': 4_0_9_6,
'14B': 5_1_2_0,
}
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = list(state_dict.keys() )
for name in state_dict_keys:
__A = state_dict.pop(_A )
# emb -> embedding
if name.startswith('emb.' ):
__A = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__A = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__A = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , _A )
# ffn -> feed_forward
__A = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , _A )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__A = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__A = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__A = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__A = 'rwkv.' + name
__A = weight
return state_dict
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None )-> Any:
"""simple docstring"""
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__A = 5_0_2_7_7
__A = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__A = PreTrainedTokenizerFast(tokenizer_file=_A )
__A = len(_A )
tokenizer.save_pretrained(_A )
# 2. Build the config
__A = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__A = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
__A = RwkvConfig(
vocab_size=_A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_A )
# 3. Download model file then convert state_dict
__A = hf_hub_download(_A , _A )
__A = torch.load(_A , map_location='cpu' )
__A = convert_state_dict(_A )
# 4. Split in shards and save
__A , __A = shard_checkpoint(_A )
for shard_file, shard in shards.items():
torch.save(_A , os.path.join(_A , _A ) )
if index is not None:
__A = os.path.join(_A , _A )
# Save the index as well
with open(_A , 'w' , encoding='utf-8' ) as f:
__A = json.dumps(_A , indent=2 , sort_keys=_A ) + '\n'
f.write(_A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__A = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__A = torch.load(os.path.join(_A , _A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_A , _A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__A = AutoModelForCausalLM.from_pretrained(_A )
model.push_to_hub(_A , max_shard_size='2GB' )
tokenizer.push_to_hub(_A )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
a__ : List[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 161 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154 | 0 |
from __future__ import annotations
from random import random
class __magic_name__ :
def __init__( self : Optional[Any] , snake_case__ : int | None = None ):
'''simple docstring'''
lowercase :List[str] = value
lowercase :Optional[int] = random()
lowercase :Node | None = None
lowercase :Node | None = None
def __repr__( self : List[str] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : str ):
'''simple docstring'''
lowercase :Dict = str(self.value ) + ''' '''
lowercase :Any = str(self.left or '''''' )
lowercase :Tuple = str(self.right or '''''' )
return value + left + right
def lowerCamelCase (a_ :Node | None , a_ :int) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase :Dict = split(root.left , a_)
return left, root
else:
lowercase :str = split(root.right , a_)
return root, right
def lowerCamelCase (a_ :Node | None , a_ :Node | None) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase :Union[str, Any] = merge(left.right , a_)
return left
else:
lowercase :int = merge(a_ , right.left)
return right
def lowerCamelCase (a_ :Node | None , a_ :int) -> Node | None:
lowercase :Any = Node(a_)
lowercase :List[str] = split(a_ , a_)
return merge(merge(a_ , a_) , a_)
def lowerCamelCase (a_ :Node | None , a_ :int) -> Node | None:
lowercase :Tuple = split(a_ , value - 1)
lowercase :Union[str, Any] = split(a_ , a_)
return merge(a_ , a_)
def lowerCamelCase (a_ :Node | None) -> None:
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=''',''')
inorder(root.right)
def lowerCamelCase (a_ :Node | None , a_ :str) -> Node | None:
for arg in args.split():
if arg[0] == "+":
lowercase :int = insert(a_ , int(arg[1:]))
elif arg[0] == "-":
lowercase :Optional[int] = erase(a_ , int(arg[1:]))
else:
print('''Unknown command''')
return root
def lowerCamelCase () -> None:
lowercase :Optional[Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''')
lowercase :Optional[Any] = input()
while args != "q":
lowercase :List[Any] = interact_treap(a_ , a_)
print(a_)
lowercase :str = input()
print('''good by!''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 351 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str) -> YolosConfig:
lowercase :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase :List[str] = 192
lowercase :List[str] = 768
lowercase :int = 12
lowercase :str = 3
lowercase :List[Any] = [800, 1333]
lowercase :Any = False
elif yolos_name == "yolos_s_dWr":
lowercase :List[str] = 330
lowercase :List[Any] = 14
lowercase :int = 6
lowercase :List[Any] = 1320
elif "yolos_s" in yolos_name:
lowercase :int = 384
lowercase :Union[str, Any] = 1536
lowercase :int = 12
lowercase :str = 6
elif "yolos_b" in yolos_name:
lowercase :Dict = [800, 1344]
lowercase :List[str] = 91
lowercase :List[Any] = '''huggingface/label-files'''
lowercase :Union[str, Any] = '''coco-detection-id2label.json'''
lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :int = in_proj_weight[: config.hidden_size, :]
lowercase :List[str] = in_proj_bias[: config.hidden_size]
lowercase :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Any = in_proj_weight[-config.hidden_size :, :]
lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase (a_ :str) -> str:
if "backbone" in name:
lowercase :Optional[int] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
lowercase :Any = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowercase :Tuple = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
lowercase :List[Any] = orig_state_dict.pop(a_)
if "qkv" in key:
lowercase :str = key.split('''.''')
lowercase :List[str] = int(key_split[2])
lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase :List[Any] = val[:dim, :]
lowercase :Optional[int] = val[
dim : dim * 2, :
]
lowercase :Any = val[-dim:, :]
else:
lowercase :List[str] = val[:dim]
lowercase :Union[str, Any] = val[dim : dim * 2]
lowercase :List[Any] = val[-dim:]
else:
lowercase :List[str] = val
return orig_state_dict
def lowerCamelCase () -> torch.Tensor:
lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]:
lowercase :Union[str, Any] = get_yolos_config(a_)
# load original state_dict
lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model''']
# load 🤗 model
lowercase :Tuple = YolosForObjectDetection(a_)
model.eval()
lowercase :Dict = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512
lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_)
lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''')
lowercase :List[Any] = model(**a_)
lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes
lowercase , lowercase :int = None, None
if yolos_name == "yolos_ti":
lowercase :Dict = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]])
lowercase :Dict = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]])
elif yolos_name == "yolos_s_200_pre":
lowercase :Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]])
lowercase :List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]])
elif yolos_name == "yolos_s_300_pre":
lowercase :int = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]])
lowercase :Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]])
elif yolos_name == "yolos_s_dWr":
lowercase :int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]])
lowercase :Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]])
elif yolos_name == "yolos_base":
lowercase :Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]])
lowercase :Tuple = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
lowercase :Optional[int] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
lowercase :Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase__ : Tuple = 192
lowerCamelCase__ : str = 768
lowerCamelCase__ : List[Any] = 12
lowerCamelCase__ : Optional[int] = 3
lowerCamelCase__ : Optional[Any] = [800, 1333]
lowerCamelCase__ : Dict = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : Tuple = 330
lowerCamelCase__ : List[str] = 14
lowerCamelCase__ : Tuple = 6
lowerCamelCase__ : List[str] = 1320
elif "yolos_s" in yolos_name:
lowerCamelCase__ : Any = 384
lowerCamelCase__ : Union[str, Any] = 1536
lowerCamelCase__ : List[str] = 12
lowerCamelCase__ : Any = 6
elif "yolos_b" in yolos_name:
lowerCamelCase__ : Optional[int] = [800, 1344]
lowerCamelCase__ : str = 91
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = 'coco-detection-id2label.json'
lowerCamelCase__ : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[Any] = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Optional[Any] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
lowerCamelCase__ : Tuple = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Tuple = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : List[str] = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase__ : str = in_proj_bias[-config.hidden_size :]
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if "backbone" in name:
lowerCamelCase__ : List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCamelCase__ : List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCamelCase__ : Dict = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCamelCase__ : Optional[int] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCamelCase__ : List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase__ : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCamelCase__ : List[str] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase__ : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase__ : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase__ : List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase__ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase__ : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase__ : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCamelCase__ : Any = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCamelCase__ : Dict = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCamelCase__ : Any = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _a ( UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ : List[str] = orig_state_dict.pop(__snake_case )
if "qkv" in key:
lowerCamelCase__ : Any = key.split('''.''' )
lowerCamelCase__ : Optional[int] = int(key_split[2] )
lowerCamelCase__ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase__ : str = val[:dim, :]
lowerCamelCase__ : str = val[
dim : dim * 2, :
]
lowerCamelCase__ : int = val[-dim:, :]
else:
lowerCamelCase__ : List[Any] = val[:dim]
lowerCamelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCamelCase__ : List[str] = val[-dim:]
else:
lowerCamelCase__ : List[str] = val
return orig_state_dict
def _a ( ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = get_yolos_config(__snake_case )
# load original state_dict
lowerCamelCase__ : Tuple = torch.load(__snake_case , map_location='''cpu''' )['model']
# load 🤗 model
lowerCamelCase__ : Optional[Any] = YolosForObjectDetection(__snake_case )
model.eval()
lowerCamelCase__ : List[str] = convert_state_dict(__snake_case , __snake_case )
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase__ : int = 800 if yolos_name != 'yolos_ti' else 512
lowerCamelCase__ : str = YolosImageProcessor(format='''coco_detection''' , size=__snake_case )
lowerCamelCase__ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase__ : Optional[Any] = model(**__snake_case )
lowerCamelCase__ : int = outputs.logits, outputs.pred_boxes
lowerCamelCase__ : Optional[Any] = None, None
if yolos_name == "yolos_ti":
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase__ : Any = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase__ : Dict = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase__ : str = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase__ : Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase__ : int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase__ : Union[str, Any] = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase__ : Optional[int] = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase__ : str = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , __snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
lowerCamelCase__ : List[str] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('''Pushing to the hub...''' )
lowerCamelCase__ : str = model_mapping[yolos_name]
image_processor.push_to_hub(__snake_case , organization='''hustvl''' )
model.push_to_hub(__snake_case , organization='''hustvl''' )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Union[str, Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 142 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 3_2
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE = 8
else:
SCREAMING_SNAKE_CASE = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE_ = mocked_dataloaders # noqa: F811
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _SCREAMING_SNAKE_CASE ) == "1":
SCREAMING_SNAKE_CASE = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config["""lr"""]
SCREAMING_SNAKE_CASE = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE = int(config["""seed"""] )
SCREAMING_SNAKE_CASE = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 358 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=7 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Union[str, Any]=18 ,lowerCamelCase__ : Optional[int]=30 ,lowerCamelCase__ : int=400 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Any=[0.5, 0.5, 0.5] ,) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
| 193 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case , steps=4)
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case)
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
np.random.seed(0)
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,))
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10)
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
np.random.seed(0)
UpperCAmelCase_ = torch.rand((1, 1, 10))
UpperCAmelCase_ = pipe(generator=_snake_case , encoding=_snake_case)
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''')
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case)
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
| 51 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = '▁'
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A_ : Optional[int] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_ : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ : str = 1
A__ : Optional[int] = len(self.sp_model )
A__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
A__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
A__ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ : int = src_lang if src_lang is not None else """eng_Latn"""
A__ : str = self.lang_code_to_id[self._src_lang]
A__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[Any] = None
A__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
A__ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , A__ ):
A__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
A__ : Dict = [1] * len(self.prefix_tokens )
A__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__ , A__ , A__ , **A__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ : Optional[int] = src_lang
A__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
A__ : Optional[int] = self.convert_tokens_to_ids(A__ )
A__ : Optional[int] = tgt_lang_id
return inputs
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ : List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
A__ : Any = src_lang
A__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , A__ ):
A__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ : Dict = []
A__ : str = [self.eos_token_id, self.cur_lang_code]
else:
A__ : List[str] = [self.cur_lang_code]
A__ : Optional[Any] = [self.eos_token_id]
def __A ( self , A__ ):
A__ : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ : Union[str, Any] = []
A__ : int = [self.eos_token_id, self.cur_lang_code]
else:
A__ : Dict = [self.cur_lang_code]
A__ : str = [self.eos_token_id]
| 192 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"""add_prefix_space""": True}
lowercase__ = False
def lowercase_ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
a : Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : Tuple = {'unk_token': '<unk>'}
a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : str , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : List[Any] , **__snake_case : str ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Any = 'lower newer'
a : Any = 'lower newer'
return input_text, output_text
def lowercase_ ( self : Tuple ):
a : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : str = 'lower newer'
a : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
a : str = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Optional[int] = tokens + [tokenizer.unk_token]
a : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : Dict ):
if not self.test_rust_tokenizer:
return
a : int = self.get_tokenizer()
a : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : List[Any] = 'lower newer'
# Testing tokenization
a : List[Any] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
a : Union[str, Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
a : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
a : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
a : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : str = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
a : int = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
a : Optional[int] = tokens + [rust_tokenizer.unk_token]
a : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : int , *__snake_case : Dict , **__snake_case : List[str] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
a : int = 'This is a simple input'
a : Dict = ['This is a simple input 1', 'This is a simple input 2']
a : Optional[Any] = ('This is a simple input', 'This is a pair')
a : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
def lowercase_ ( self : int ):
a : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
a : Dict = 'This is a simple input'
a : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
a : Dict = ('This is a simple input', 'This is a pair')
a : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
a : Union[str, Any] = tokenizer.pad_token_id
a : Dict = tokenizer(__snake_case , padding='max_length' , max_length=30 , return_tensors='np' )
a : int = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
a : Dict = tokenizer(*__snake_case , padding='max_length' , max_length=60 , return_tensors='np' )
a : List[Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowercase_ ( self : int ):
a : int = '$$$'
a : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
a : Optional[int] = 'This is a simple input'
a : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
a : Tuple = tokenizer.bos_token_id
a : int = tokenizer(__snake_case )
a : Any = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a : Optional[Any] = tokenizer.decode(out_s.input_ids )
a : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
a : str = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
a : Optional[int] = '\nif len_a > len_b: result = a\nelse: result = b'
a : Dict = tokenizer.encode(__snake_case )
a : Optional[int] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
a : Any = tokenizer.decode(__snake_case , truncate_before_pattern=__snake_case )
self.assertEqual(__snake_case , __snake_case )
def lowercase_ ( self : List[str] ):
pass | 361 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CycleDiffusionPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Any ):
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : List[str] = CLIPTextModel(__snake_case )
a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self : Optional[int] , __snake_case : Dict , __snake_case : Any=0 ):
a : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
a : Optional[Any] = image / 2 + 0.5
if str(__snake_case ).startswith('mps' ):
a : List[str] = torch.manual_seed(__snake_case )
else:
a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : List[Any] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : int = self.get_dummy_components()
a : str = CycleDiffusionPipeline(**__snake_case )
a : List[str] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : Dict = self.get_dummy_inputs(__snake_case )
a : Union[str, Any] = pipe(**__snake_case )
a : List[Any] = output.images
a : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : Tuple = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self : int ):
a : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , 'half' ):
a : Any = module.half()
a : Tuple = CycleDiffusionPipeline(**__snake_case )
a : Any = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a : str = self.get_dummy_inputs(__snake_case )
a : int = pipe(**__snake_case )
a : Optional[int] = output.images
a : Tuple = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a : int = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase_ ( self : Dict ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase_ ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self : List[Any] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a : List[str] = init_image.resize((5_12, 5_12) )
a : Dict = 'CompVis/stable-diffusion-v1-4'
a : List[str] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : Any = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Union[str, Any] = 'A black colored car'
a : Optional[Any] = 'A blue colored car'
a : int = torch.manual_seed(0 )
a : Optional[Any] = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Dict = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def lowercase_ ( self : int ):
a : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a : str = init_image.resize((5_12, 5_12) )
a : Optional[int] = 'CompVis/stable-diffusion-v1-4'
a : Union[str, Any] = DDIMScheduler.from_pretrained(__snake_case , subfolder='scheduler' )
a : str = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
a : Tuple = 'A black colored car'
a : Tuple = 'A blue colored car'
a : List[str] = torch.manual_seed(0 )
a : str = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type='np' , )
a : Tuple = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 96 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : List[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = {}
with open(a__ , """r""" ) as file:
for line_number, line in enumerate(a__ ):
__UpperCAmelCase : List[Any] = line.strip()
if line:
__UpperCAmelCase : Dict = line.split()
__UpperCAmelCase : List[Any] = line_number
__UpperCAmelCase : int = words[0]
__UpperCAmelCase : str = value
return result
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : Optional[int] = getattr(a__ , a__ )
__UpperCAmelCase : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__UpperCAmelCase : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Tuple = getattr(a__ , a__ ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Optional[Any] = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Optional[Any] = getattr(a__ , a__ )
__UpperCAmelCase : Any = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : Optional[Any] = value[0]
else:
__UpperCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : Any = value
elif weight_type == "weight_v":
__UpperCAmelCase : List[str] = value
elif weight_type == "bias":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : str = getattr(a__ , a__ )
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : Dict = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(a__ ):
__UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : Union[str, Any] = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : List[str] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : str = """.""".join([key, hf_param_name] )
else:
__UpperCAmelCase : List[Any] = key
__UpperCAmelCase : str = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Any = True
if "*" in mapped_key:
__UpperCAmelCase : int = name.split(a__ )[0].split(""".""" )[-2]
__UpperCAmelCase : Tuple = mapped_key.replace("""*""" , a__ )
if "weight_g" in name:
__UpperCAmelCase : Any = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : Any = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : str = """weight"""
else:
__UpperCAmelCase : Any = None
if hf_dict is not None:
rename_dict(a__ , a__ , a__ , a__ , a__ )
else:
set_recursively(a__ , a__ , a__ , a__ , a__ )
return is_used
return is_used
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
__UpperCAmelCase : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : List[str] = True
else:
__UpperCAmelCase : int = load_wavaveca_layer(a__ , a__ , a__ )
if not is_used:
unused_weights.append(a__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : str = name.split(""".""" )
__UpperCAmelCase : Any = int(items[0] )
__UpperCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : List[str] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : Optional[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : Dict = WavaVecaConfig.from_pretrained(a__ )
else:
__UpperCAmelCase : Tuple = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : List[Any] = read_txt_into_dict(a__ )
__UpperCAmelCase : List[Any] = idalabel
__UpperCAmelCase : int = WavaVecaForSequenceClassification(a__ )
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
feature_extractor.save_pretrained(a__ )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : str = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Tuple = target_dict.pad_index
__UpperCAmelCase : str = target_dict.bos_index
__UpperCAmelCase : List[str] = target_dict.eos_index
__UpperCAmelCase : Tuple = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(a__ , """vocab.json""" )
if not os.path.isdir(a__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
__UpperCAmelCase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[str] = 1
with open(a__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(a__ , a__ )
__UpperCAmelCase : Tuple = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=a__ , )
__UpperCAmelCase : Optional[int] = True if config.feat_extract_norm == """layer""" else False
__UpperCAmelCase : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__UpperCAmelCase : List[str] = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__UpperCAmelCase : int = WavaVecaForCTC(a__ )
else:
__UpperCAmelCase : Union[str, Any] = WavaVecaForPreTraining(a__ )
if is_finetuned or is_seq_class:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCAmelCase : List[str] = argparse.Namespace(task="""audio_pretraining""" )
__UpperCAmelCase : Union[str, Any] = fairseq.tasks.setup_task(a__ )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=a__ )
__UpperCAmelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(a__ , a__ , not is_finetuned )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Optional[Any] = parser.parse_args()
UpperCAmelCase : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 115 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_:
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ ):
raise NotImplementedError()
def snake_case__ ( self ):
raise NotImplementedError()
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , **lowerCamelCase__ ):
_lowerCamelCase = tokenizer
_lowerCamelCase = skip_prompt
_lowerCamelCase = decode_kwargs
# variables used in the streaming process
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = True
def snake_case__ ( self , lowerCamelCase__ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
_lowerCamelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCamelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
_lowerCamelCase = text[self.print_len :]
_lowerCamelCase = []
_lowerCamelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCamelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCamelCase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(lowerCamelCase__ )
self.on_finalized_text(lowerCamelCase__ )
def snake_case__ ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCamelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCamelCase = text[self.print_len :]
_lowerCamelCase = []
_lowerCamelCase = 0
else:
_lowerCamelCase = ''''''
_lowerCamelCase = True
self.on_finalized_text(lowerCamelCase__ , stream_end=lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
print(lowerCamelCase__ , flush=lowerCamelCase__ , end='''''' if not stream_end else None )
def snake_case__ ( self , lowerCamelCase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
_lowerCamelCase = Queue()
_lowerCamelCase = None
_lowerCamelCase = timeout
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = False ):
self.text_queue.put(lowerCamelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def snake_case__ ( self ):
_lowerCamelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 366 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[1_0, 2_0, 3_0, 4_0] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxRegNetModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 73 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase : Optional[int] = 8
# DPR tok
lowerCAmelCase : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : Dict = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase : Optional[int] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : str = {"unk_token": "<unk>"}
lowerCAmelCase : int = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCAmelCase : int = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Dict = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowerCAmelCase : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowerCAmelCase : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(snake_case__ )
rag_tokenizer.save_pretrained(snake_case__ )
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowerCAmelCase : Dict = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowerCAmelCase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowerCAmelCase : str = tokenizer(snake_case__ )
self.assertIsNotNone(snake_case__ )
| 108 | """simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172 | 0 |
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = [0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : int = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_SCREAMING_SNAKE_CASE : str = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
UpperCamelCase__ ='abc1abc12'
UpperCamelCase__ ='alskfjaldsabc1abc1abc12k23adsfabcabc'
UpperCamelCase__ ='alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
UpperCamelCase__ ='ABABX'
UpperCamelCase__ ='ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
UpperCamelCase__ ='AAAB'
UpperCamelCase__ ='ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
UpperCamelCase__ ='abcdabcy'
UpperCamelCase__ ='abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
UpperCamelCase__ ='aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2] | 325 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x | 325 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: int = logging.get_logger(__name__)
_UpperCamelCase: Dict = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class a__ ( UpperCamelCase__ ):
_lowerCamelCase = 'gpt_neox_japanese'
def __init__( self : str, lowerCAmelCase : List[Any]=32000, lowerCAmelCase : List[Any]=2560, lowerCAmelCase : str=32, lowerCAmelCase : List[Any]=32, lowerCAmelCase : List[str]=4, lowerCAmelCase : Dict="gelu", lowerCAmelCase : Dict=1.00, lowerCAmelCase : Optional[int]=10000, lowerCAmelCase : Dict=2048, lowerCAmelCase : List[str]=0.02, lowerCAmelCase : List[str]=1e-5, lowerCAmelCase : List[str]=True, lowerCAmelCase : List[str]=31996, lowerCAmelCase : Tuple=31999, lowerCAmelCase : List[Any]=0.1, lowerCAmelCase : int=0.0, **lowerCAmelCase : str, ) -> Union[str, Any]:
super().__init__(bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase, **__lowerCamelCase )
lowercase : Optional[Any] = vocab_size
lowercase : List[str] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Union[str, Any] = intermediate_multiple_size
lowercase : str = hidden_act
lowercase : Optional[int] = rotary_pct
lowercase : Optional[Any] = rotary_emb_base
lowercase : Optional[int] = initializer_range
lowercase : int = layer_norm_eps
lowercase : Dict = use_cache
lowercase : str = attention_dropout
lowercase : Optional[int] = hidden_dropout
| 255 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = hf_hub_download(
repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = VideoClassificationPipeline(model=__lowerCamelCase,image_processor=__lowerCamelCase,top_k=2 )
A__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
for example in examples:
A__ = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase,[
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10},crop_size={'''height''': 10, '''width''': 10} )
A__ = pipeline(
'''video-classification''',model=__lowerCamelCase,feature_extractor=__lowerCamelCase,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = video_classifier(__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
],)
@require_tf
def UpperCamelCase ( self ):
pass
| 193 | 0 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =CustomTokenizer
pass
| 257 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
UpperCAmelCase_ =42
UpperCAmelCase_ =(16, 32, 96, 256)
UpperCAmelCase_ =jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i]
SCREAMING_SNAKE_CASE_ = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
SCREAMING_SNAKE_CASE_ = blocks
SCREAMING_SNAKE_CASE_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = block(_A )
SCREAMING_SNAKE_CASE_ = nn.silu(_A )
SCREAMING_SNAKE_CASE_ = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCamelCase__ ( nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =32
UpperCAmelCase_ =4
UpperCAmelCase_ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase_ =False
UpperCAmelCase_ =(320, 640, 1_280, 1_280)
UpperCAmelCase_ =2
UpperCAmelCase_ =8
UpperCAmelCase_ =None
UpperCAmelCase_ =1_280
UpperCAmelCase_ =0.0
UpperCAmelCase_ =False
UpperCAmelCase_ =jnp.floataa
UpperCAmelCase_ =True
UpperCAmelCase_ =0
UpperCAmelCase_ ="rgb"
UpperCAmelCase_ =(16, 32, 96, 256)
def _UpperCamelCase ( self , _A ) -> FrozenDict:
# init input tensors
SCREAMING_SNAKE_CASE_ = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE_ = jnp.zeros(_A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = jax.random.split(_A )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.block_out_channels
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE_ = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE_ = FlaxTimestepEmbedding(_A , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE_ = self.only_cross_attention
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
SCREAMING_SNAKE_CASE_ = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE_ = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE_ = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
SCREAMING_SNAKE_CASE_ = down_blocks
SCREAMING_SNAKE_CASE_ = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE_ = block_out_channels[-1]
SCREAMING_SNAKE_CASE_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE_ = nn.Conv(
_A , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ) -> Union[FlaxControlNetOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE_ = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
SCREAMING_SNAKE_CASE_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ = jnp.expand_dims(_A , 0 )
SCREAMING_SNAKE_CASE_ = self.time_proj(_A )
SCREAMING_SNAKE_CASE_ = self.time_embedding(_A )
# 2. pre-process
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.conv_in(_A )
SCREAMING_SNAKE_CASE_ = jnp.transpose(_A , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , _A , deterministic=not train )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE_ = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE_ = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE_ = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE_ = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE_ = self.controlnet_mid_block(_A )
# 6. scaling
SCREAMING_SNAKE_CASE_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__lowerCamelCase ) , "Tatoeba directory does not exist." )
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.resolver.write_model_card('''opus-mt-he-en''', dry_run=__magic_name__ )
assert mmeta["long_pair"] == "heb-eng"
| 201 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : List[Any] = os.path.join(lowercase__ , 'all_results.json' )
if os.path.exists(lowercase__ ):
with open(lowercase__ , 'r' ) as f:
_lowerCamelCase : List[Any] = json.load(lowercase__ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def A_ ( self ):
import xla_spawn
_lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : List[Any] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase , 'argv' , lowercase ):
_lowerCamelCase : Dict = time()
xla_spawn.main()
_lowerCamelCase : Any = time()
_lowerCamelCase : Optional[int] = get_results(lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def A_ ( self ):
import xla_spawn
_lowerCamelCase : Tuple = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase , 'argv' , lowercase ):
xla_spawn.main() | 96 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = SwinConfig()
_UpperCAmelCase : Dict = swin_name.split("_" )
_UpperCAmelCase : Tuple = name_split[1]
_UpperCAmelCase : Any = int(name_split[4] )
_UpperCAmelCase : Optional[int] = int(name_split[3][-1] )
if model_size == "tiny":
_UpperCAmelCase : Optional[int] = 96
_UpperCAmelCase : Dict = (2, 2, 6, 2)
_UpperCAmelCase : Optional[Any] = (3, 6, 12, 24)
elif model_size == "small":
_UpperCAmelCase : Any = 96
_UpperCAmelCase : List[str] = (2, 2, 18, 2)
_UpperCAmelCase : Any = (3, 6, 12, 24)
elif model_size == "base":
_UpperCAmelCase : Tuple = 128
_UpperCAmelCase : List[Any] = (2, 2, 18, 2)
_UpperCAmelCase : Tuple = (4, 8, 16, 32)
else:
_UpperCAmelCase : Any = 192
_UpperCAmelCase : Tuple = (2, 2, 18, 2)
_UpperCAmelCase : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
_UpperCAmelCase : str = 21_841
else:
_UpperCAmelCase : List[str] = 1_000
_UpperCAmelCase : List[str] = "huggingface/label-files"
_UpperCAmelCase : Union[str, Any] = "imagenet-1k-id2label.json"
_UpperCAmelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Tuple = idalabel
_UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : str = img_size
_UpperCAmelCase : str = num_classes
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : Union[str, Any] = depths
_UpperCAmelCase : int = num_heads
_UpperCAmelCase : Optional[int] = window_size
return config
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase : Tuple = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_UpperCAmelCase : List[Any] = "encoder." + name
if "attn.proj" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_UpperCAmelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
_UpperCAmelCase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : Dict = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_UpperCAmelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : str = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
_UpperCAmelCase : str = "layernorm.weight"
if name == "norm.bias":
_UpperCAmelCase : Optional[int] = "layernorm.bias"
if "head" in name:
_UpperCAmelCase : Tuple = name.replace("head" , "classifier" )
else:
_UpperCAmelCase : List[Any] = "swin." + name
return name
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : Optional[int] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
_UpperCAmelCase : Optional[Any] = key.split("." )
_UpperCAmelCase : Dict = int(key_split[1] )
_UpperCAmelCase : List[Any] = int(key_split[3] )
_UpperCAmelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase : List[str] = val[:dim, :]
_UpperCAmelCase : Tuple = val[
dim : dim * 2, :
]
_UpperCAmelCase : int = val[-dim:, :]
else:
_UpperCAmelCase : int = val[
:dim
]
_UpperCAmelCase : str = val[
dim : dim * 2
]
_UpperCAmelCase : Optional[int] = val[
-dim:
]
else:
_UpperCAmelCase : List[str] = val
return orig_state_dict
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
_UpperCAmelCase : List[str] = get_swin_config(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = SwinForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
_UpperCAmelCase : Optional[Any] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
_UpperCAmelCase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
_UpperCAmelCase : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_UpperCAmelCase : Tuple = timm_model(inputs["pixel_values"] )
_UpperCAmelCase : int = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print(f'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 202 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[str]
__SCREAMING_SNAKE_CASE : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : Optional[int] = 'test'
class UpperCAmelCase_ :
@staticmethod
def snake_case_ ( A : Union[str, Any] , A : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : str ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Optional[int]=False , A : List[str]="[CLS]" , A : List[Any]=1 , A : str="[SEP]" , A : int=False , A : int=False , A : Any=0 , A : List[str]=0 , A : Dict=-1_0_0 , A : str=0 , A : Optional[Any]=True , ):
_UpperCAmelCase : Dict = {label: i for i, label in enumerate(A )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , A , len(A ) )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
for word, label in zip(example.words , example.labels ):
_UpperCAmelCase : str = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : List[Any] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : Dict = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : str = [cls_token] + tokens
_UpperCAmelCase : Dict = [pad_token_label_id] + label_ids
_UpperCAmelCase : Any = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : List[Any] = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
_UpperCAmelCase : List[str] = max_seq_length - len(A )
if pad_on_left:
_UpperCAmelCase : str = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Dict = None
features.append(
InputFeatures(
input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : List[str]=False , A : Split = Split.train , ):
# Load data features from cache or dataset file
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_UpperCAmelCase : Tuple = torch.load(A )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_UpperCAmelCase : List[str] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[Any] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , A )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[str] , A : Optional[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = -1_0_0
def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[Any]=False , A : Split = Split.train , ):
_UpperCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[str] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : int , A : int ):
return self.features[i]
| 202 | 1 |
def __UpperCamelCase ( _A : int ) ->"list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
lowerCamelCase_ =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCamelCase_ =1
if upper_limit > 0:
lowerCamelCase_ =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
__A : List[str] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 154 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class lowerCAmelCase__ ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase : str = "text"
lowerCAmelCase : str = "labels"
def UpperCAmelCase ( self : Dict ,_snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,_snake_case ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ : int = copy.deepcopy(self )
lowercase__ : Any = self.label_schema.copy()
lowercase__ : Any = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def UpperCAmelCase ( self : Dict ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 368 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict:
__lowercase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
__lowercase = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Any:
__lowercase = {}
__lowercase = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__lowercase = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowercase = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE )
__lowercase = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE )
__lowercase = flax_dict[key]
__lowercase = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowercase = torch.from_numpy(converted_dict[key].T )
else:
__lowercase = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=False ) -> Dict:
__lowercase = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
__lowercase = PixaStructVisionConfig()
__lowercase = PixaStructTextConfig()
else:
__lowercase = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
__lowercase = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
__lowercase = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
__lowercase = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
__lowercase = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowercase = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__lowercase = PixaStructImageProcessor()
__lowercase = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
__lowercase = 4096
__lowercase = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 325 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __magic_name__ ( __a : int , __a : Tuple ):
'''simple docstring'''
return torch.atana(__a , __a ) / math.pi * 2
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase__ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__a , __a )
class __A( __lowerCamelCase ):
"""simple docstring"""
pass
class __A( nn.Module ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
super().__init__()
UpperCamelCase__ = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE_ , n_attn_layers=4 )
UpperCamelCase__ = deepcopy(self.diffusion )
UpperCamelCase__ = torch.quasirandom.SobolEngine(1 , scramble=SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = MODELS_MAP[model_name]["""url"""]
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
lowerCamelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(__a ) and not isinstance(__a , __a ):
return name.replace(__a , __a )
elif name.startswith(__a ):
return [name.replace(__a , __a ) for v in value]
raise ValueError(f"Attn error with {name}" )
def __magic_name__ ( __a : Any , __a : Union[str, Any]=13 ):
'''simple docstring'''
UpperCamelCase__ = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase__ = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase__ = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase__ = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase__ = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase__ = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase__ = string[:2]
UpperCamelCase__ = string[2:]
else:
UpperCamelCase__ = string[0]
UpperCamelCase__ = string[1:]
if depth == max_depth:
UpperCamelCase__ = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase__ = """mid_block"""
elif depth > 0 and int(__a ) < 7:
UpperCamelCase__ = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase__ = f"down_blocks.{depth}"
elif depth > 0 and int(__a ) > 7:
UpperCamelCase__ = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase__ = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
UpperCamelCase__ = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase__ = f"up_blocks.{max_depth - 1}" if int(__a ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
UpperCamelCase__ = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase__ = convert_resconv_naming(__a )
elif "attentions" in new_layer:
UpperCamelCase__ = convert_attn_naming(__a )
UpperCamelCase__ = new_string_left
if not isinstance(__a , __a ):
UpperCamelCase__ = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase__ = rename(__a )
# check if we need to transform from Conv => Linear for attention
if isinstance(__a , __a ):
UpperCamelCase__ = transform_conv_attns(__a , __a , __a )
else:
UpperCamelCase__ = v
return new_state_dict
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[Any] , __a : List[str] ):
'''simple docstring'''
if len(__a ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase__ = v[:, :, 0]
else:
# bias
UpperCamelCase__ = v
else:
# qkv matrices
UpperCamelCase__ = v.shape[0]
UpperCamelCase__ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase__ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase__ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase__ = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
UpperCamelCase__ = download(__a )
UpperCamelCase__ = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase__ = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase__ = Object()
UpperCamelCase__ = sample_size
UpperCamelCase__ = sample_rate
UpperCamelCase__ = 0
UpperCamelCase__ = UNetaDModel(sample_size=__a , sample_rate=__a )
UpperCamelCase__ = diffusers_model.state_dict()
UpperCamelCase__ = DiffusionUncond(__a )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__a )["""state_dict"""] )
UpperCamelCase__ = orig_model.diffusion_ema.eval()
UpperCamelCase__ = orig_model.state_dict()
UpperCamelCase__ = rename_orig_weights(__a )
UpperCamelCase__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__a ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("""kernel""" ) for k in list(__a ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
UpperCamelCase__ = value.squeeze()
UpperCamelCase__ = value
diffusers_model.load_state_dict(__a )
UpperCamelCase__ = 100
UpperCamelCase__ = 33
UpperCamelCase__ = IPNDMScheduler(num_train_timesteps=__a )
UpperCamelCase__ = torch.manual_seed(__a )
UpperCamelCase__ = torch.randn([1, 2, config.sample_size] , generator=__a ).to(__a )
UpperCamelCase__ = torch.linspace(1 , 0 , steps + 1 , device=__a )[:-1]
UpperCamelCase__ = get_crash_schedule(__a )
UpperCamelCase__ = DanceDiffusionPipeline(unet=__a , scheduler=__a )
UpperCamelCase__ = torch.manual_seed(33 )
UpperCamelCase__ = pipe(num_inference_steps=__a , generator=__a ).audios
UpperCamelCase__ = sampling.iplms_sample(__a , __a , __a , {} )
UpperCamelCase__ = generated.clamp(-1 , 1 )
UpperCamelCase__ = (generated - audio).abs().sum()
UpperCamelCase__ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __a )
print("""Diff max""" , __a )
assert diff_max < 1E-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase_ = parser.parse_args()
main(args)
| 178 |
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
while a != 0:
UpperCamelCase__ , UpperCamelCase__ = b % a, a
return b
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
if gcd(__a , __a ) != 1:
UpperCamelCase__ = f"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__a )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1, 0, a
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0, 1, m
while va != 0:
UpperCamelCase__ = ua // va
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 178 | 1 |
from __future__ import annotations
lowerCAmelCase__ : List[Any] =10
def __lowercase ( a__ ) -> list[int]:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
__SCREAMING_SNAKE_CASE = [[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__SCREAMING_SNAKE_CASE = int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
__SCREAMING_SNAKE_CASE = 0
for b in range(a__ ):
for i in buckets[b]:
__SCREAMING_SNAKE_CASE = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
def __lowercase ( a__=2_81_23 ) -> List[str]:
__SCREAMING_SNAKE_CASE = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(a__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 257 | 1 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase_ ():
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 68 |
"""simple docstring"""
import qiskit
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ):
_UpperCAmelCase : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Union[str, Any] = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : Tuple = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :str = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 68 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__ ( __snake_case : float ) -> float:
if num <= 0:
raise ValueError("math domain error" )
return quad(__snake_case , 0 , __snake_case , args=(__snake_case) )[0]
def __magic_name__ ( __snake_case : float , __snake_case : float ) -> float:
return math.pow(__snake_case , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 202 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple:
if conf_path is None:
lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml"
lowercase : Tuple = load_config(__snake_case , display=__snake_case )
lowercase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase : List[str] = "./model_checkpoints/vqgan_only.pt"
lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase : str = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int:
lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase : str = model.decode(__snake_case )
return xrec
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int:
lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 )
if reload:
lowercase : Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def __magic_name__ ( __snake_case : str ) -> List[str]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str:
lowercase : Optional[int] = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
lowercase : Dict = torch.load(__snake_case , map_location="cpu" )
lowercase : List[Any] = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
lowercase : int = {"state_dict": None}
lowercase : Optional[Any] = None
lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 202 | 1 |
import qiskit
def __magic_name__ ( __a : int = 2 ):
'''simple docstring'''
UpperCamelCase__ = qubits
# Using Aer's simulator
UpperCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
UpperCamelCase__ = qiskit.QuantumCircuit(__a , __a )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __a ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __a )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__a ) ) , list(range(__a ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
UpperCamelCase__ = qiskit.execute(__a , __a , shots=1_000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 178 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_prompt=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ = AutoTokenizer.from_pretrained("""distilgpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
model.generate(SCREAMING_SNAKE_CASE_ , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE_ , streamer=SCREAMING_SNAKE_CASE_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = TextIteratorStreamer(SCREAMING_SNAKE_CASE_ , timeout=0.001 )
UpperCamelCase__ = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """"""
for new_text in streamer:
streamer_text += new_text
| 178 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : Dict = ''
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCAmelCase__ = "" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[int]:
super().__init__(self , **__lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__magic_name__ : Union[str, Any] = fsspec.open(
__lowercase , mode="""rb""" , protocol=__lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__magic_name__ : Union[str, Any] = os.path.basename(self.file.path.split("""::""" )[0] )
__magic_name__ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__magic_name__ : Optional[Any] = None
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ ) -> Dict:
return super()._strip_protocol(__lowercase ).lstrip("""/""" )
def __magic_name__ ( self ) -> List[str]:
if self.dir_cache is None:
__magic_name__ : str = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__magic_name__ : Union[str, Any] = {f["""name"""]: f}
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
return self.file.open().read()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[Any]:
__magic_name__ : Dict = self._strip_protocol(__lowercase )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : str = 'bz2'
lowercase__ : Any = 'bz2'
lowercase__ : List[str] = '.bz2'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[Any] = 'gzip'
lowercase__ : Optional[Any] = 'gzip'
lowercase__ : List[Any] = '.gz'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[str] = 'lz4'
lowercase__ : Optional[Any] = 'lz4'
lowercase__ : Dict = '.lz4'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : str = 'xz'
lowercase__ : Optional[Any] = 'xz'
lowercase__ : Dict = '.xz'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[Any] = 'zstd'
lowercase__ : str = 'zstd'
lowercase__ : List[Any] = '.zst'
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = DEFAULT_BLOCK_SIZE , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(
fo=__lowercase , mode=__lowercase , target_protocol=__lowercase , target_options=__lowercase , block_size=__lowercase , **__lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__magic_name__ : int = self.file.__enter__
class snake_case__ :
def __init__( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Tuple = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
self._file.__exit__(*__lowercase , **__lowercase )
def __iter__( self ) -> str:
return iter(self._file )
def __magic_name__ ( self ) -> Optional[Any]:
return next(self._file )
def __getattr__( self , lowerCAmelCase__ ) -> Tuple:
return getattr(self._file , __lowercase )
def fixed_enter(*lowerCAmelCase__ , **lowerCAmelCase__ ):
return WrappedFile(_enter(*__lowercase , **__lowercase ) )
__magic_name__ : List[str] = fixed_enter
| 342 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowercase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__UpperCamelCase =[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =object_detector(examples[0] , threshold=0.0 )
__UpperCamelCase =len(UpperCamelCase__ )
self.assertGreater(UpperCamelCase__ , 0 )
self.assertEqual(
UpperCamelCase__ , [
{
'''score''': ANY(UpperCamelCase__ ),
'''label''': ANY(UpperCamelCase__ ),
'''box''': {'''xmin''': ANY(UpperCamelCase__ ), '''ymin''': ANY(UpperCamelCase__ ), '''xmax''': ANY(UpperCamelCase__ ), '''ymax''': ANY(UpperCamelCase__ )},
}
for i in range(UpperCamelCase__ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
__UpperCamelCase =object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
] , )
__UpperCamelCase =object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 204, '''ymin''': 167, '''xmax''': 232, '''ymax''': 190}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 571, '''ymin''': 83, '''xmax''': 598, '''ymax''': 103}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 274, '''xmax''': 93, '''ymax''': 297}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 494, '''ymin''': 105, '''xmax''': 521, '''ymax''': 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =pipeline('''zero-shot-object-detection''' )
__UpperCamelCase =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
] , )
__UpperCamelCase =object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 335, '''ymin''': 74, '''xmax''': 371, '''ymax''': 187}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 642, '''ymax''': 476}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def UpperCAmelCase_ ( self : str ) -> Dict:
'''simple docstring'''
pass
@require_torch
@slow
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =0.2
__UpperCamelCase =pipeline('''zero-shot-object-detection''' )
__UpperCamelCase =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 315, '''ymax''': 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
__UpperCamelCase =2
__UpperCamelCase =pipeline('''zero-shot-object-detection''' )
__UpperCamelCase =object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=UpperCamelCase__ , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 324, '''ymin''': 20, '''xmax''': 640, '''ymax''': 373}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 177, '''ymax''': 115}},
] , )
| 369 | """simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCAmelCase (__UpperCamelCase : Tuple ):
"""simple docstring"""
__UpperCamelCase =SwinConfig()
__UpperCamelCase =swin_name.split('''_''' )
__UpperCamelCase =name_split[1]
__UpperCamelCase =int(name_split[4] )
__UpperCamelCase =int(name_split[3][-1] )
if model_size == "tiny":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 6, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "small":
__UpperCamelCase =9_6
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(3, 6, 1_2, 2_4)
elif model_size == "base":
__UpperCamelCase =1_2_8
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(4, 8, 1_6, 3_2)
else:
__UpperCamelCase =1_9_2
__UpperCamelCase =(2, 2, 1_8, 2)
__UpperCamelCase =(6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__UpperCamelCase =2_1_8_4_1
else:
__UpperCamelCase =1_0_0_0
__UpperCamelCase ='''huggingface/label-files'''
__UpperCamelCase ='''imagenet-1k-id2label.json'''
__UpperCamelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =img_size
__UpperCamelCase =num_classes
__UpperCamelCase =embed_dim
__UpperCamelCase =depths
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
return config
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
if "patch_embed.proj" in name:
__UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
__UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__UpperCamelCase ='''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase ='''layernorm.bias'''
if "head" in name:
__UpperCamelCase =name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase ='''swin.''' + name
return name
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__UpperCamelCase =orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCamelCase =key.split('''.''' )
__UpperCamelCase =int(key_split[1] )
__UpperCamelCase =int(key_split[3] )
__UpperCamelCase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCamelCase =val[:dim, :]
__UpperCamelCase =val[
dim : dim * 2, :
]
__UpperCamelCase =val[-dim:, :]
else:
__UpperCamelCase =val[
:dim
]
__UpperCamelCase =val[
dim : dim * 2
]
__UpperCamelCase =val[
-dim:
]
else:
__UpperCamelCase =val
return orig_state_dict
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
__UpperCamelCase =get_swin_config(__UpperCamelCase )
__UpperCamelCase =SwinForImageClassification(__UpperCamelCase )
model.eval()
__UpperCamelCase =convert_state_dict(timm_model.state_dict() , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
__UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__UpperCamelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
__UpperCamelCase =image_processor(images=__UpperCamelCase , return_tensors='''pt''' )
__UpperCamelCase =timm_model(inputs['''pixel_values'''] )
__UpperCamelCase =model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 85 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = TextToVideoSDPipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(a )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _UpperCamelCase ( self , a , a=0 ) -> Tuple:
if str(a ).startswith('mps' ):
snake_case_ = torch.manual_seed(a )
else:
snake_case_ = torch.Generator(device=a ).manual_seed(a )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = TextToVideoSDPipeline(**a )
snake_case_ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
snake_case_ = self.get_dummy_inputs(a )
snake_case_ = 'np'
snake_case_ = sd_pipe(**a ).frames
snake_case_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
snake_case_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
def _UpperCamelCase ( self ) -> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> str:
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
snake_case_ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case_ = pipe.to('cuda' )
snake_case_ = 'Spiderman is surfing'
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipe(a , generator=a , num_inference_steps=25 , output_type='pt' ).frames
snake_case_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
snake_case_ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
snake_case_ = pipe.to('cuda' )
snake_case_ = 'Spiderman is surfing'
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = pipe(a , generator=a , num_inference_steps=2 , output_type='pt' ).frames
snake_case_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 178 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_)])
snake_case_ = np.array(a_)
snake_case_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_)) , x.transpose()) , a_)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = (1, 2, 1)
snake_case_ = (1, 1, 0, 7)
snake_case_ = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_)
snake_case_ = model.fit(disp=a_ , maxiter=6_00 , method='nm')
snake_case_ = model_fit.predict(1 , len(a_) , exog=[test_match])
return result[0]
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1)
regressor.fit(a_ , a_)
snake_case_ = regressor.predict(a_)
return y_pred[0]
def __UpperCAmelCase ( a_):
train_user.sort()
snake_case_ = np.percentile(a_ , 25)
snake_case_ = np.percentile(a_ , 75)
snake_case_ = qa - qa
snake_case_ = qa - (iqr * 0.1)
return low_lim
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = 0
for i in list_vote:
if i > actual_result:
snake_case_ = not_safe + 1
else:
if abs(abs(a_) - abs(a_)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase = normalize_df[:, 2].tolist()
lowercase = normalize_df[:, 0].tolist()
lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase = normalize_df[:, [1, 2]].tolist()
lowercase = x[: len(x) - 1]
lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase = total_date[: len(total_date) - 1]
lowercase = total_user[: len(total_user) - 1]
lowercase = total_match[: len(total_match) - 1]
lowercase = total_date[len(total_date) - 1 :]
lowercase = total_user[len(total_user) - 1 :]
lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 178 | 1 |
from statistics import mean, stdev
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 3):
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = max(_UpperCAmelCase)
# normalize data
return [round((x - x_min) / (x_max - x_min) , _UpperCAmelCase) for x in data]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase = 3):
SCREAMING_SNAKE_CASE = mean(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = stdev(_UpperCAmelCase)
# standardize data
return [round((x - mu) / (sigma) , _UpperCAmelCase) for x in data]
| 364 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a , cache_dir=a)
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(a , os.listdir(a)[0] , 'snapshots'))]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1E-3
assert np.abs(np.abs(a , dtype=np.floataa).sum() - 4_99_47.8_75) < 5E-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a) == num_samples
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_38_38_08.2)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_37_35_16.75)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=a , steps_offset=1 , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=a , safety_checker=a , )
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = jax.random.split(a , a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1E-3
assert np.abs((np.abs(a , dtype=np.floataa).sum() - 2_34_76_93.5)) < 5E-1
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0) , a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=a , use_memory_efficient_attention=a , )
SCREAMING_SNAKE_CASE = replicate(a)
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(a)
SCREAMING_SNAKE_CASE = shard(a)
SCREAMING_SNAKE_CASE = pipeline(a , a , a , jit=a).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1E-2
| 327 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = TransfoXLTokenizer
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
A__ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase ( self , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def UpperCamelCase ( self , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = "<unk> UNwanted , running"
A__ = "<unk> unwanted, running"
return input_text, output_text
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase )
A__ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(lowercase , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = TransfoXLTokenizer(lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = TransfoXLTokenizer(lower_case=lowercase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = TransfoXLTokenizer(lower_case=lowercase )
A__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
A__ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowercase ) , lowercase )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.get_tokenizer()
A__ = len(lowercase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowercase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 68 |
import random
class a__ :
"""simple docstring"""
@staticmethod
def UpperCamelCase ( lowercase ) -> tuple[list[int], list[int]]:
'''simple docstring'''
A__ = [ord(lowercase ) for i in text]
A__ = []
A__ = []
for i in plain:
A__ = random.randint(1 , 300 )
A__ = (i + k) * k
cipher.append(lowercase )
key.append(lowercase )
return cipher, key
@staticmethod
def UpperCamelCase ( lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = []
for i in range(len(lowercase ) ):
A__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(lowercase ) )
return "".join(lowercase )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = Onepad().encrypt("""Hello""")
print(c, k)
print(Onepad().decrypt(c, k))
| 68 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 231 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Union[str, Any] = """▁"""
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : str = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
_lowerCamelCase : List[str] = {
"""google/pegasus-xsum""": 512,
}
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str="<pad>" , UpperCAmelCase__ : Optional[Any]="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<mask_2>" , UpperCAmelCase__ : List[str]="<mask_1>" , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=103 , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Dict , ) ->None:
'''simple docstring'''
A__ = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is"""
f""" {type(UpperCAmelCase__)}""")
A__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCAmelCase__) , self.offset - 1)
]
if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""")
A__ = additional_special_tokens_extended
else:
A__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset)]
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
A__ = mask_token_sent
A__ = vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase__)
# add special tokens to encoder dict
A__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
})
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)})
A__ = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return len(self.sp_model) + self.offset
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict[str, int]:
'''simple docstring'''
A__ = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) ->Union[str, Any]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : int , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A__ = self.sp_model.piece_to_id(UpperCAmelCase__)
return sp_id + self.offset
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : int) ->str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A__ = self.sp_model.IdToPiece(index - self.offset)
return token
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase__) + token
A__ = []
else:
current_sub_tokens.append(UpperCAmelCase__)
out_string += self.sp_model.decode(UpperCAmelCase__)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=False) ->Union[str, Any]:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = set(self.all_special_ids) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List , UpperCAmelCase__ : Optional[List] = None , UpperCAmelCase__ : bool = False) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__)
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a) + [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
A__ = os.path.join(
UpperCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
| 231 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 178 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
snake_case_ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(a )
from datasets import load_dataset
snake_case_ = load_dataset('nielsr/rvlcdip-demo' )
snake_case_ = dataset['train'][0]['image'].convert('RGB' )
snake_case_ = image_processor(a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
snake_case_ = model(**a )
snake_case_ = outputs.logits
snake_case_ = torch.Size((1, 16) )
self.assertEqual(logits.shape , a )
snake_case_ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1E-4 ) )
| 178 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__lowerCAmelCase : Union[str, Any] = ''
__lowerCAmelCase : Tuple = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_UpperCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__lowerCAmelCase : List[str] = 0, 0
# length[i] shows the length of palindromic substring with center i
__lowerCAmelCase : List[Any] = [1 for i in range(len(_UpperCamelCase ) )]
# for each character in new_string find corresponding palindromic string
__lowerCAmelCase : Optional[int] = 0
for j in range(len(_UpperCamelCase ) ):
__lowerCAmelCase : int = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_UpperCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__lowerCAmelCase : List[str] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__lowerCAmelCase : str = j - k + 1 # noqa: E741
__lowerCAmelCase : Optional[int] = j + k - 1
# update max_length and start position
if max_length < length[j]:
__lowerCAmelCase : Optional[Any] = length[j]
__lowerCAmelCase : List[Any] = j
# create that string
__lowerCAmelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : str = 'nllb-moe'
A_ : Optional[Any] = ['past_key_values']
A_ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=12_81_12 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : Dict = d_model
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Any = encoder_attention_heads
__lowerCAmelCase : Tuple = decoder_ffn_dim
__lowerCAmelCase : Dict = decoder_layers
__lowerCAmelCase : str = decoder_attention_heads
__lowerCAmelCase : str = dropout
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : List[Any] = activation_function
__lowerCAmelCase : List[str] = init_std
__lowerCAmelCase : Union[str, Any] = encoder_layerdrop
__lowerCAmelCase : List[Any] = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase : Union[str, Any] = router_z_loss_coef
__lowerCAmelCase : Optional[Any] = router_aux_loss_coef
__lowerCAmelCase : int = decoder_sparse_step
__lowerCAmelCase : str = encoder_sparse_step
__lowerCAmelCase : Tuple = num_experts
__lowerCAmelCase : Dict = expert_capacity
__lowerCAmelCase : Union[str, Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowerCAmelCase : Union[str, Any] = router_dtype
__lowerCAmelCase : Any = router_ignore_padding_tokens
__lowerCAmelCase : str = batch_prioritized_routing
__lowerCAmelCase : Tuple = second_expert_policy
__lowerCAmelCase : List[str] = normalize_router_prob_before_dropping
__lowerCAmelCase : Dict = moe_eval_capacity_token_fraction
__lowerCAmelCase : Union[str, Any] = moe_token_dropout
__lowerCAmelCase : List[Any] = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) | 182 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase__ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
self.assertTrue(isinstance(dc.token_ids ,lowercase_ ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : Tuple ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase__ : List[str] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase_ ):
DisjunctiveConstraint(lowercase_ ) # fails here
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase__ : Optional[int] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
lowerCAmelCase__ : int = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
lowerCAmelCase__ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = dc.update(3 )
lowerCAmelCase__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(lowercase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase__ : List[Any] = DisjunctiveConstraint(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Dict = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 106 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Dict = "bridgetower_vision_model"
def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = stop_gradient
snake_case_ = share_layernorm
snake_case_ = remove_last_layer
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Any = "bridgetower_text_model"
def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = initializer_factor
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
@classmethod
def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ )
if config_dict.get("model_type" ) == "bridgetower":
snake_case_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "bridgetower"
def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int:
'''simple docstring'''
snake_case_ = kwargs.pop("text_config_dict" , a__ )
snake_case_ = kwargs.pop("vision_config_dict" , a__ )
super().__init__(**a__ )
snake_case_ = share_cross_modal_transformer_layers
snake_case_ = hidden_act
snake_case_ = hidden_size
snake_case_ = initializer_factor
snake_case_ = layer_norm_eps
snake_case_ = share_link_tower_layers
snake_case_ = link_tower_type
snake_case_ = num_attention_heads
snake_case_ = num_hidden_layers
snake_case_ = tie_word_embeddings
snake_case_ = init_layernorm_from_vision_encoder
if text_config is None:
snake_case_ = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
snake_case_ = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
snake_case_ = BridgeTowerTextConfig(**a__ )
snake_case_ = BridgeTowerVisionConfig(**a__ )
@classmethod
def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.text_config.to_dict()
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 85 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def lowercase__ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = WavaVecaForSequenceClassification.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : Optional[Any] = downstream_dict['projector.weight']
UpperCAmelCase_ : List[Any] = downstream_dict['projector.bias']
UpperCAmelCase_ : Tuple = downstream_dict['model.post_net.linear.weight']
UpperCAmelCase_ : List[str] = downstream_dict['model.post_net.linear.bias']
return model
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : List[str] = downstream_dict['model.linear.weight']
UpperCAmelCase_ : Union[str, Any] = downstream_dict['model.linear.bias']
return model
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = WavaVecaForXVector.from_pretrained(__snake_case , config=__snake_case )
UpperCAmelCase_ : List[Any] = downstream_dict['connector.weight']
UpperCAmelCase_ : List[str] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ : int = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCAmelCase_ : str = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCAmelCase_ : str = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCAmelCase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCAmelCase_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCAmelCase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCAmelCase_ : List[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : str , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = torch.load(__snake_case , map_location='cpu' )
UpperCAmelCase_ : Dict = checkpoint['Downstream']
UpperCAmelCase_ : Any = WavaVecaConfig.from_pretrained(__snake_case )
UpperCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained(
__snake_case , return_attention_mask=__snake_case , do_normalize=__snake_case )
UpperCAmelCase_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCAmelCase_ : Union[str, Any] = convert_classification(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCAmelCase_ : str = convert_diarization(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForXVector' ):
UpperCAmelCase_ : Optional[Any] = convert_xvector(__snake_case , __snake_case , __snake_case )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ : Dict = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 366 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = 0, 0, 0
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
UpperCAmelCase_ : Tuple = ugly_nums[ia] * 3
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
UpperCAmelCase_ : Tuple = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Union[str, Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ : Any = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 145 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = 'laion/clap-htsat-unfused'
__magic_name__ = tempfile.mkdtemp()
def _lowercase ( self : Union[str, Any] , **UpperCamelCase__ : str ) -> int:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **_A )
def _lowercase ( self : List[str] , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ = self.get_feature_extractor(do_normalize=_A , padding_value=1.0 )
__magic_name__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = floats_list((3, 1000) )
__magic_name__ = feature_extractor(_A , return_tensors="""np""" )
__magic_name__ = processor(audios=_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = 'This is a test string'
__magic_name__ = processor(text=_A )
__magic_name__ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(_A )
__magic_name__ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 88 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 0 |
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def __magic_name__ ( ) -> None:
'''simple docstring'''
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(F"| 0 | 0 | {nor_gate(0, 0 )} |" )
print(F"| 0 | 1 | {nor_gate(0, 1 )} |" )
print(F"| 1 | 0 | {nor_gate(1, 0 )} |" )
print(F"| 1 | 1 | {nor_gate(1, 1 )} |" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case_ = _modexpt(__UpperCAmelCase, exponent // 2, __UpperCAmelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase, exponent - 1, __UpperCAmelCase )) % modulo_value
def __magic_name__ ( __UpperCAmelCase = 1777, __UpperCAmelCase = 1855, __UpperCAmelCase = 8 ) -> int:
'''simple docstring'''
snake_case_ = base
for _ in range(1, __UpperCAmelCase ):
snake_case_ = _modexpt(__UpperCAmelCase, __UpperCAmelCase, 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 72 | 0 |
import pytest
_A = "__dummy_dataset1__"
_A = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def lowerCamelCase__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCamelCase__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCamelCase__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
"""simple docstring"""
lowerCAmelCase_ = dataset_loading_script_name
lowerCAmelCase_ = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
lowerCAmelCase_ = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 231 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ):
"""simple docstring"""
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ""
else:
lowerCAmelCase_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = dct.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = "huggingface/label-files"
lowerCAmelCase_ = "imagenet-1k-id2label.json"
lowerCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(deit_name[-6:-4] )
lowerCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
elif deit_name[9:].startswith("small" ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
# load original model from timm
lowerCAmelCase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCAmelCase_ = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase_ = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase_ = encoding["pixel_values"]
lowerCAmelCase_ = model(__lowerCAmelCase )
lowerCAmelCase_ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 231 | 1 |
import os
def UpperCamelCase__ ( lowerCAmelCase = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCAmelCase ) , lowerCAmelCase ) ) as in_file:
_lowerCAmelCase = in_file.read()
_lowerCAmelCase = [[int(lowerCAmelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
_lowerCAmelCase = [[0 for cell in row] for row in grid]
_lowerCAmelCase = len(grid[0] )
_lowerCAmelCase = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
_lowerCAmelCase = grid[0][0]
for i in range(1 , lowerCAmelCase ):
_lowerCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCAmelCase ):
_lowerCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
_lowerCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355 |
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
_lowerCAmelCase = f"{src_lang}-{tgt_lang}"
_lowerCAmelCase = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
_lowerCAmelCase = os.path.join(lowerCAmelCase , """README.md""" )
print(f"Generating {path}" )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCAmelCase )
# make sure we are under the root of the project
A__ : Optional[int] =Path(__file__).resolve().parent.parent.parent
A__ : Union[str, Any] =repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A__ , A__ , A__ : Optional[Any] =model_name.split('''-''')
A__ : List[str] =model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 220 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['YolosFeatureExtractor']
UpperCAmelCase_ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowercase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE : Tuple = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE : int = orig_key.split('''.''' )[0].split('''_''' )[-1]
SCREAMING_SNAKE_CASE : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE : Any = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE : List[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE : Dict = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE : Optional[int] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE : str = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE : List[str] = '''yoso.''' + orig_key
return orig_key
def A ( _lowercase , _lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = val
SCREAMING_SNAKE_CASE : List[str] = orig_state_dict['''cls.predictions.decoder.bias''']
SCREAMING_SNAKE_CASE : Dict = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = torch.load(_lowercase , map_location='''cpu''' )['''model_state_dict''']
SCREAMING_SNAKE_CASE : List[Any] = YosoConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE : str = YosoForMaskedLM(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__UpperCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCamelCase : Optional[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 182 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase = logging.get_logger(__name__)
# General docstring
lowerCAmelCase = '''RegNetConfig'''
# Base docstring
lowerCAmelCase = '''facebook/regnet-y-040'''
lowerCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCAmelCase = '''facebook/regnet-y-040'''
lowerCAmelCase = '''tabby, tabby cat'''
lowerCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowercase= tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowercase= tf.keras.layers.ConvaD(
filters=lowerCAmelCase , kernel_size=lowerCAmelCase , strides=lowerCAmelCase , padding='VALID' , groups=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' , )
__lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
__lowercase= ACTaFN[activation] if activation is not None else tf.identity
def _A (self , lowerCAmelCase ):
__lowercase= self.convolution(self.padding(lowerCAmelCase ) )
__lowercase= self.normalization(lowerCAmelCase )
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= config.num_channels
__lowercase= TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def _A (self , lowerCAmelCase ):
__lowercase= shape_list(lowerCAmelCase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 2, 3, 1) )
__lowercase= self.embedder(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase = 2 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= tf.keras.layers.ConvaD(
filters=lowerCAmelCase , kernel_size=1 , strides=lowerCAmelCase , use_bias=lowerCAmelCase , name='convolution' )
__lowercase= tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def _A (self , lowerCAmelCase , lowerCAmelCase = False ):
return self.normalization(self.convolution(lowerCAmelCase ) , training=lowerCAmelCase )
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' )
__lowercase= [
tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowerCAmelCase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def _A (self , lowerCAmelCase ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowercase= self.pooler(lowerCAmelCase )
for layer_module in self.attention:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= in_channels != out_channels or stride != 1
__lowercase= max(1 , out_channels // config.groups_width )
__lowercase= (
TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowercase= [
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.2' ),
]
__lowercase= ACTaFN[config.hidden_act]
def _A (self , lowerCAmelCase ):
__lowercase= hidden_state
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= self.shortcut(lowerCAmelCase )
hidden_state += residual
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= in_channels != out_channels or stride != 1
__lowercase= max(1 , out_channels // config.groups_width )
__lowercase= (
TFRegNetShortCut(lowerCAmelCase , stride=lowerCAmelCase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
__lowercase= [
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase , name='layer.3' ),
]
__lowercase= ACTaFN[config.hidden_act]
def _A (self , lowerCAmelCase ):
__lowercase= hidden_state
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
__lowercase= self.shortcut(lowerCAmelCase )
hidden_state += residual
__lowercase= self.activation(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowercase= [
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , name='layers.0' ),
*[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def _A (self , lowerCAmelCase ):
for layer_module in self.layers:
__lowercase= layer_module(lowerCAmelCase )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
__lowercase= zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase , name=f'stages.{i+1}' ) )
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
__lowercase= () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase= hidden_states + (hidden_state,)
__lowercase= stage_module(lowerCAmelCase )
if output_hidden_states:
__lowercase= hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase )
@keras_serializable
class A ( tf.keras.layers.Layer ):
UpperCamelCase_ : Union[str, Any] =RegNetConfig
def __init__(self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
__lowercase= config
__lowercase= TFRegNetEmbeddings(lowerCAmelCase , name='embedder' )
__lowercase= TFRegNetEncoder(lowerCAmelCase , name='encoder' )
__lowercase= tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase , name='pooler' )
@unpack_inputs
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.embedder(lowerCAmelCase , training=lowerCAmelCase )
__lowercase= self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase )
__lowercase= encoder_outputs[0]
__lowercase= self.pooler(lowerCAmelCase )
# Change to NCHW output format have uniformity in the modules
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) )
__lowercase= tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowercase= tuple([tf.transpose(lowerCAmelCase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( A_ ):
UpperCamelCase_ : Optional[Any] =RegNetConfig
UpperCamelCase_ : Optional[int] ='''regnet'''
UpperCamelCase_ : Union[str, Any] ='''pixel_values'''
@property
def _A (self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
lowerCAmelCase = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A_ , )
class A ( A_ ):
def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
__lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.regnet(
pixel_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A_ , )
class A ( A_ , A_ ):
def __init__(self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
__lowercase= config.num_labels
__lowercase= TFRegNetMainLayer(lowerCAmelCase , name='regnet' )
# classification head
__lowercase= [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A (self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , ):
__lowercase= (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase= return_dict if return_dict is not None else self.config.use_return_dict
__lowercase= self.regnet(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase , training=lowerCAmelCase )
__lowercase= outputs.pooler_output if return_dict else outputs[1]
__lowercase= self.classifier[0](lowerCAmelCase )
__lowercase= self.classifier[1](lowerCAmelCase )
__lowercase= None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase , logits=lowerCAmelCase )
if not return_dict:
__lowercase= (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
| 363 |
import math
from datetime import datetime, timedelta
def _lowerCamelCase( lowercase__ ) -> datetime:
'''simple docstring'''
__lowercase= year % 1_9
__lowercase= year % 4
__lowercase= year % 7
__lowercase= math.floor(year / 1_0_0 )
__lowercase= math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__lowercase= leap_day_inhibits / 4
__lowercase= (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__lowercase= (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowercase= (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__lowercase= (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(lowercase__ , 4 , 1_8 )
else:
return datetime(lowercase__ , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowerCAmelCase = '''will be''' if year > datetime.now().year else '''was'''
print(F'Easter in {year} {tense} {gauss_easter(year)}')
| 304 | 0 |
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
__lowerCamelCase = 0
__lowerCamelCase = str(a_ )
while len(a_ ) != 1:
__lowerCamelCase = [int(a_ ) for i in num_string]
__lowerCamelCase = 1
for i in range(0 , len(a_ ) ):
total *= numbers[i]
__lowerCamelCase = str(a_ )
steps += 1
return steps
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
__lowerCamelCase = 0
__lowerCamelCase = str(a_ )
while len(a_ ) != 1:
__lowerCamelCase = [int(a_ ) for i in num_string]
__lowerCamelCase = 0
for i in range(0 , len(a_ ) ):
total += numbers[i]
__lowerCamelCase = str(a_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | '''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : int = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : int = VideoClassificationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , top_k=2 )
_UpperCAmelCase : int = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
for example in examples:
_UpperCAmelCase : List[str] = video_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{"score": ANY(lowerCAmelCase__ ), "label": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "label": ANY(lowerCAmelCase__ )},
] , )
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_UpperCAmelCase : Optional[int] = VideoMAEFeatureExtractor(
size={"shortest_edge": 1_0} , crop_size={"height": 1_0, "width": 1_0} )
_UpperCAmelCase : List[str] = pipeline(
"video-classification" , model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , frame_sampling_rate=4 )
_UpperCAmelCase : Tuple = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : Tuple = video_classifier(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
_UpperCAmelCase : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass | 145 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__lowerCamelCase : Optional[Any] = '''CompVis/stable-diffusion-v1-1'''
__lowerCamelCase : int = '''CompVis/stable-diffusion-v1-2'''
__lowerCamelCase : List[str] = '''CompVis/stable-diffusion-v1-3'''
__lowerCamelCase : List[Any] = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : AutoencoderKL , __A : CLIPTextModel , __A : CLIPTokenizer , __A : UNetaDConditionModel , __A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __A : StableDiffusionSafetyChecker , __A : CLIPImageProcessor , __A : bool = True , ):
super()._init_()
snake_case__ : List[str] = StableDiffusionPipeline.from_pretrained(__a )
snake_case__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__a )
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained(__a )
snake_case__ : Optional[Any] = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _lowercase ( self : Optional[int] ):
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Tuple , __A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case__ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(__a )
@torch.no_grad()
def _lowercase ( self : Union[str, Any] , __A : Union[str, List[str]] , __A : int = 5_1_2 , __A : int = 5_1_2 , __A : int = 5_0 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : Dict , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def _lowercase ( self : Optional[int] , __A : Union[str, List[str]] , __A : int = 5_1_2 , __A : int = 5_1_2 , __A : int = 5_0 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : Any , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def _lowercase ( self : str , __A : Union[str, List[str]] , __A : int = 5_1_2 , __A : int = 5_1_2 , __A : int = 5_0 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : int , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def _lowercase ( self : List[str] , __A : Union[str, List[str]] , __A : int = 5_1_2 , __A : int = 5_1_2 , __A : int = 5_0 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : Optional[int] , ):
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def _lowercase ( self : List[Any] , __A : Union[str, List[str]] , __A : int = 5_1_2 , __A : int = 5_1_2 , __A : int = 5_0 , __A : float = 7.5 , __A : Optional[Union[str, List[str]]] = None , __A : Optional[int] = 1 , __A : float = 0.0 , __A : Optional[torch.Generator] = None , __A : Optional[torch.FloatTensor] = None , __A : Optional[str] = "pil" , __A : bool = True , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , **__A : int , ):
snake_case__ : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case__ : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case__ : Optional[Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case__ : List[str] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case__ : Union[str, Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case__ : int = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Union[str, Any] = CLIPTextModel(__A )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , __A : int , __A : Any=0 ):
snake_case__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
snake_case__ : List[Any] = torch.manual_seed(__A )
else:
snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs(__A )
snake_case__ : List[str] = sd_pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : List[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : List[Any] = "french fries"
snake_case__ : str = sd_pipe(**__A , negative_prompt=__A )
snake_case__ : Any = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Any = self.get_dummy_inputs(__A )
snake_case__ : Tuple = [inputs["prompt"]] * 2
snake_case__ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : List[str] = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
snake_case__ : str = image.permute(0 , 3 , 1 , 2 )
snake_case__ : int = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : str = sd_pipe(**__A ).images
snake_case__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : int = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : Optional[Any] = sd_pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : str = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = VaeImageProcessor(do_resize=__A , do_normalize=__A )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
snake_case__ : int = components["vae"]
snake_case__ : Union[str, Any] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : str = pipe(**__A )[0]
snake_case__ : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str , __A : Dict=0 ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
snake_case__ : Tuple = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ : Optional[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Dict ):
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
snake_case__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : Optional[int] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Any = latents[0, -3:, -3:, -1]
snake_case__ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : Any = False
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[Any] = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Dict = self.get_inputs()
snake_case__ : List[Any] = pipe(**__A )
snake_case__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self : Tuple ):
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Union[str, Any] = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case__ : Optional[Any] = "timbrooks/instruct-pix2pix"
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = pipe(**__A )
snake_case__ : Tuple = output.images[0]
snake_case__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 286 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
if not (isinstance(A_ , A_ ) and isinstance(A_ , A_ )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
A__ = len(A_ )
A__ = len(A_ )
A__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
A__ = 0
A__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
A__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
A__ = i
A__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
a = [1]
a , a , a = 0, 0, 0
a = ugly_nums[ia] * 2
a = ugly_nums[ia] * 3
a = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase):
a = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
ugly_nums.append(__UpperCamelCase)
if next_num == next_a:
ia += 1
a = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
a = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
a = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 363 |
lowercase__ : str = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ : Any = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 180 | 0 |
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__=False ) -> int:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ):
__lowercase= len(set_a.intersection(__snake_case ) )
if alternative_union:
__lowercase= len(__snake_case ) + len(__snake_case )
else:
__lowercase= len(set_a.union(__snake_case ) )
return intersection / union
if isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) ):
__lowercase= [element for element in set_a if element in set_b]
if alternative_union:
__lowercase= len(__snake_case ) + len(__snake_case )
return len(__snake_case ) / union
else:
__lowercase= set_a + [element for element in set_b if element not in set_a]
return len(__snake_case ) / len(__snake_case )
return len(__snake_case ) / len(__snake_case )
return None
if __name__ == "__main__":
lowerCAmelCase = {'a', 'b', 'c', 'd', 'e'}
lowerCAmelCase = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 295 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ):
'''simple docstring'''
lowercase = []
lowercase , lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
lowercase , lowercase = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : Any =version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCamelCase : Union[str, Any] ='''\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
'''
lowerCamelCase : List[str] ='''\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
'''
lowerCamelCase : List[str] ='''
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
\'meteor\': meteor score.
Examples:
>>> meteor = datasets.load_metric(\'meteor\')
>>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]
>>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results["meteor"], 4))
0.6944
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=0.9 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCamelCase__ : Dict = [
meteor_score.single_meteor_score(
word_tokenize(SCREAMING_SNAKE_CASE ) , word_tokenize(SCREAMING_SNAKE_CASE ) , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE )
for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
else:
UpperCamelCase__ : Optional[int] = [
meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE )
for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(SCREAMING_SNAKE_CASE )} | 354 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""") | 196 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: Optional[Any] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Dict = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
a__: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193 |
'''simple docstring'''
import random
class snake_case__ :
@staticmethod
def A ( _A : str ) -> tuple[list[int], list[int]]:
UpperCAmelCase_ : Dict = [ord(_A ) for i in text]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Any = []
for i in plain:
UpperCAmelCase_ : int = random.randint(1 , 3_00 )
UpperCAmelCase_ : str = (i + k) * k
cipher.append(_A )
key.append(_A )
return cipher, key
@staticmethod
def A ( _A : list[int] , _A : list[int] ) -> str:
UpperCAmelCase_ : Dict = []
for i in range(len(_A ) ):
UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_A ) )
return "".join(_A )
if __name__ == "__main__":
_UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 304 | 0 |
def __lowercase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowercase ( lowerCamelCase : list[list[int]] , lowerCamelCase : list[int] , lowerCamelCase : int ):
# Base Case
if curr_ind == len(lowerCamelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCamelCase ) ):
if valid_connection(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Insert current vertex into path as next transition
UpperCamelCase_ : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(lowerCamelCase , lowerCamelCase , curr_ind + 1 ):
return True
# Backtrack
UpperCamelCase_ : int = -1
return False
def __lowercase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int = 0 ):
UpperCamelCase_ : List[str] = [-1] * (len(lowerCamelCase ) + 1)
# initialize start and end of path with starting index
UpperCamelCase_ : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCamelCase , lowerCamelCase , 1 ) else []
| 50 | import baseaa
def __lowercase ( lowerCamelCase : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def __lowercase ( lowerCamelCase : bytes ):
return baseaa.baadecode(lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
a_ = 'Hello World!'
a_ = baseaa_encode(test)
print(encoded)
a_ = baseaa_decode(encoded)
print(decoded)
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (pow(_UpperCAmelCase , 2 ) + step) % modulus
for _ in range(_UpperCAmelCase ):
# These track the position within the cycle detection logic.
A_ : List[Any] = seed
A_ : Any = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
A_ : Union[str, Any] = rand_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Tuple = rand_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : List[str] = rand_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
A_ : Any = gcd(hare - tortoise , _UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
A_ : Any = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
lowerCamelCase_ : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
lowerCamelCase_ : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}") | 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 1 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = len(_UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCamelCase ):
return None
__lowerCAmelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowerCAmelCase = left
__lowerCAmelCase = point
elif point > right:
__lowerCAmelCase = right
__lowerCAmelCase = point
else:
if item < current_item:
__lowerCAmelCase = point - 1
else:
__lowerCAmelCase = point + 1
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
_UpperCamelCase , _UpperCamelCase , point + 1 , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if collection != sorted(_UpperCamelCase ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
A : Any = 0
if debug == 1:
A : int = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
A : Union[str, Any] = 6_7
A : Tuple = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("Not found")
| 364 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259 | 0 |
"""simple docstring"""
import baseaa
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : bytes ):
'''simple docstring'''
return baseaa.aaadecode(snake_case__ ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 46 | import random
from typing import Any
def snake_case ( snake_case__ :list) -> list[Any]:
for _ in range(len(snake_case__)):
_A = random.randint(0 , len(snake_case__) - 1)
_A = random.randint(0 , len(snake_case__) - 1)
_A , _A = data[b], data[a]
return data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = [0, 1, 2, 3, 4, 5, 6, 7]
_SCREAMING_SNAKE_CASE = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 180 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase__ :Any = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase__ ( a__: Optional[Any] ) -> List[Any]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a__ )
def lowerCAmelCase__ ( a__: Dict ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(a__ , id=a__ )
| 352 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=9 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.002 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return TaConfig.from_pretrained('google/umt5-base' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, input_dict
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , decoder_attention_mask=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_SCREAMING_SNAKE_CASE ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).get_decoder().to(_SCREAMING_SNAKE_CASE ).eval()
# first forward pass
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) + 1 )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )['last_hidden_state']
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )['last_hidden_state']
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).half().eval()
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )['last_hidden_state']
self.parent.assertFalse(torch.isnan(_SCREAMING_SNAKE_CASE ).any().item() )
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_a : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_a : Tuple = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_a : List[str] = True
_a : List[Any] = False
_a : Tuple = False
_a : List[Any] = True
_a : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_a : Tuple = [0.8, 0.9]
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_SCREAMING_SNAKE_CASE , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_SCREAMING_SNAKE_CASE , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(_SCREAMING_SNAKE_CASE ).eval()
model.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE ),
}
for attn_name, (name, mask) in zip(_SCREAMING_SNAKE_CASE , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=_SCREAMING_SNAKE_CASE , return_dict_in_generate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=_SCREAMING_SNAKE_CASE , legacy=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model.generate(input_ids.to(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 185 | 0 |
import argparse
import os
import re
import packaging.version
lowercase__ :Dict = "examples/"
lowercase__ :List[Any] = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase__ :List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase__ :str = "README.md"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase = f.read()
lowercase = REPLACE_PATTERNS[pattern]
lowercase = replace.replace('''VERSION''' , lowerCAmelCase__ )
lowercase = re_pattern.sub(lowerCAmelCase__ , lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(lowerCAmelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ , pattern='''examples''' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not patch:
update_version_in_examples(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '🤗 Transformers currently provides the following architectures'
lowercase = '1. Want to contribute a new model?'
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase = f.readlines()
# Find the start of the list.
lowercase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase = f.read()
lowercase = REPLACE_PATTERNS['init'][0].search(lowerCAmelCase__ ).groups()[0]
return packaging.version.parse(lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase = default_version.base_version
elif patch:
lowercase = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowercase = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowercase = input(f'Which version are you releasing? [{default_version}]' )
if len(lowerCAmelCase__ ) == 0:
lowercase = default_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase__ , patch=lowerCAmelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = get_version()
lowercase = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowercase = current_version.base_version
# Check with the user we got that right.
lowercase = input(f'Which version are we developing now? [{dev_version}]' )
if len(lowerCAmelCase__ ) == 0:
lowercase = dev_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ :int = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase__ :Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 101 |
import unittest
from knapsack import knapsack as k
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = 0
lowercase__: List[Any] = [0]
lowercase__: str = [0]
lowercase__: Tuple = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
lowercase__: Optional[Any] = [60]
lowercase__: Dict = [10]
lowercase__: str = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = [1, 2, 3]
lowercase__: Union[str, Any] = [3, 2, 1]
lowercase__: Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 50
lowercase__: str = [60, 100, 120]
lowercase__: Any = [10, 20, 30]
lowercase__: List[Any] = len(lowerCAmelCase__ )
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 196 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = args.pruning_method
_SCREAMING_SNAKE_CASE : Tuple = args.threshold
_SCREAMING_SNAKE_CASE : str = args.model_name_or_path.rstrip("/" )
_SCREAMING_SNAKE_CASE : Tuple = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.load(os.path.join(_lowercase, "pytorch_model.bin" ) )
_SCREAMING_SNAKE_CASE : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_SCREAMING_SNAKE_CASE : Optional[int] = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_SCREAMING_SNAKE_CASE : int = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_SCREAMING_SNAKE_CASE : Union[str, Any] = MagnitudeBinarizer.apply(inputs=_lowercase, threshold=_lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : str = name[:-6]
_SCREAMING_SNAKE_CASE : Dict = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE : Tuple = TopKBinarizer.apply(_lowercase, _lowercase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : int = name[:-6]
_SCREAMING_SNAKE_CASE : List[Any] = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ThresholdBinarizer.apply(_lowercase, _lowercase, _lowercase )
_SCREAMING_SNAKE_CASE : int = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_SCREAMING_SNAKE_CASE : str = name[:-6]
_SCREAMING_SNAKE_CASE : Tuple = model[f"""{prefix_}mask_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = -0.1, 1.1
_SCREAMING_SNAKE_CASE : Any = torch.sigmoid(_lowercase )
_SCREAMING_SNAKE_CASE : Optional[int] = s * (r - l) + l
_SCREAMING_SNAKE_CASE : Tuple = s_bar.clamp(min=0.0, max=1.0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
os.path.dirname(_lowercase ), f"""bertarized_{os.path.basename(_lowercase )}""" )
if not os.path.isdir(_lowercase ):
shutil.copytree(_lowercase, _lowercase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowercase, os.path.join(_lowercase, "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
UpperCamelCase__ =parser.parse_args()
main(args)
| 359 |
from timeit import timeit
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
number &= number - 1
result += 1
return result
def lowerCamelCase__ (__lowerCamelCase ):
if number < 0:
raise ValueError("the value of input must not be negative" )
_SCREAMING_SNAKE_CASE : str = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowerCamelCase__ ():
def do_benchmark(__lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : Tuple = "import __main__ as z"
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : str = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=__lowerCamelCase )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCamelCase ) = }""" )
_SCREAMING_SNAKE_CASE : int = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)", setup=__lowerCamelCase, )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 325 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ : List[Any] = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 50 |
from itertools import count
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 50 ) -> int:
lowerCamelCase__ : Optional[Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A( lowerCamelCase_ ):
'''simple docstring'''
UpperCamelCase = ["""image_processor""", """tokenizer"""]
UpperCamelCase = """CLIPImageProcessor"""
UpperCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : int , A_ : Tuple=None , A_ : List[Any]=None , **A_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Any , A_ : List[Any]=None , A_ : List[Any]=None , A_ : Tuple=None , **A_ : Tuple ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
lowerCamelCase_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a__ ( self : List[str] , *A_ : int , **A_ : Optional[Any] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , *A_ : List[str] , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer.model_input_names
lowerCamelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def a__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 367 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Any = "ybelkada/fonts"
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] , lowercase : List[Any] ):
'''simple docstring'''
requires_backends(lowercase , ['torch'] )
_check_torch_version()
lowerCamelCase_ = image_tensor.unsqueeze(0 )
lowerCamelCase_ = torch.nn.functional.unfold(lowercase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowercase , lowercase , -1 )
lowerCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int = 36 , lowercase : str = "black" , lowercase : str = "white" , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : int = 5 , lowercase : Optional[bytes] = None , lowercase : Optional[str] = None , ):
'''simple docstring'''
requires_backends(lowercase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
lowerCamelCase_ = textwrap.TextWrapper(width=80 )
lowerCamelCase_ = wrapper.wrap(text=lowercase )
lowerCamelCase_ = '\n'.join(lowercase )
if font_bytes is not None and font_path is None:
lowerCamelCase_ = io.BytesIO(lowercase )
elif font_path is not None:
lowerCamelCase_ = font_path
else:
lowerCamelCase_ = hf_hub_download(lowercase , 'Arial.TTF' )
lowerCamelCase_ = ImageFont.truetype(lowercase , encoding='UTF-8' , size=lowercase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCamelCase_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , lowercase ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = temp_draw.textbbox((0, 0) , lowercase , lowercase )
# Create the actual image with a bit of padding around the text.
lowerCamelCase_ = text_width + left_padding + right_padding
lowerCamelCase_ = text_height + top_padding + bottom_padding
lowerCamelCase_ = Image.new('RGB' , (image_width, image_height) , lowercase )
lowerCamelCase_ = ImageDraw.Draw(lowercase )
draw.text(xy=(left_padding, top_padding) , text=lowercase , fill=lowercase , font=lowercase )
return image
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(lowercase , 'vision' )
# Convert to PIL image if necessary
lowerCamelCase_ = to_pil_image(lowercase )
lowerCamelCase_ = render_text(lowercase , **lowercase )
lowerCamelCase_ = max(header_image.width , image.width )
lowerCamelCase_ = int(image.height * (new_width / image.width) )
lowerCamelCase_ = int(header_image.height * (new_width / header_image.width) )
lowerCamelCase_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCamelCase_ = to_numpy_array(lowercase )
if infer_channel_dimension_format(lowercase ) == ChannelDimension.LAST:
lowerCamelCase_ = to_channel_dimension_format(lowercase , ChannelDimension.LAST )
return new_image
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''flattened_patches''']
def __init__( self : Dict , A_ : bool = True , A_ : bool = True , A_ : Dict[str, int] = None , A_ : int = 2048 , A_ : bool = False , **A_ : str , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
lowerCamelCase_ = do_normalize
lowerCamelCase_ = do_convert_rgb
lowerCamelCase_ = max_patches
lowerCamelCase_ = is_vqa
def a__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : int , A_ : dict , **A_ : Any ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
lowerCamelCase_ = to_channel_dimension_format(A_ , ChannelDimension.FIRST )
lowerCamelCase_ = torch.from_numpy(A_ )
lowerCamelCase_ , lowerCamelCase_ = patch_size['height'], patch_size['width']
lowerCamelCase_ , lowerCamelCase_ = get_image_size(A_ )
# maximize scale s.t.
lowerCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , A_ ) , 1 )
lowerCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , A_ ) , 1 )
lowerCamelCase_ = max(num_feasible_rows * patch_height , 1 )
lowerCamelCase_ = max(num_feasible_cols * patch_width , 1 )
lowerCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=A_ , antialias=A_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = torch_extract_patches(A_ , A_ , A_ )
lowerCamelCase_ = patches.shape
lowerCamelCase_ = patches_shape[1]
lowerCamelCase_ = patches_shape[2]
lowerCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCamelCase_ = torch.arange(A_ ).reshape([rows, 1] ).repeat(1 , A_ ).reshape([rows * columns, 1] )
lowerCamelCase_ = torch.arange(A_ ).reshape([1, columns] ).repeat(A_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCamelCase_ = row_ids.to(torch.floataa )
lowerCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCamelCase_ = torch.nn.functional.pad(A_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCamelCase_ = to_numpy_array(A_ )
return result
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
lowerCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowerCamelCase_ = np.mean(A_ )
lowerCamelCase_ = np.std(A_ )
lowerCamelCase_ = max(A_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(A_ , mean=A_ , std=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : ImageInput , A_ : Optional[str] = None , A_ : bool = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Optional[int] , ) -> ImageInput:
"""simple docstring"""
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = patch_size if patch_size is not None else self.patch_size
lowerCamelCase_ = max_patches if max_patches is not None else self.max_patches
lowerCamelCase_ = self.is_vqa
if kwargs.get('data_format' , A_ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
lowerCamelCase_ = kwargs.pop('font_bytes' , A_ )
lowerCamelCase_ = kwargs.pop('font_path' , A_ )
if isinstance(A_ , A_ ):
lowerCamelCase_ = [header_text] * len(A_ )
lowerCamelCase_ = [
render_header(A_ , header_text[i] , font_bytes=A_ , font_path=A_ )
for i, image in enumerate(A_ )
]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ ) for image in images]
# convert to torch tensor and permute
lowerCamelCase_ = [
self.extract_flattened_patches(image=A_ , max_patches=A_ , patch_size=A_ )
for image in images
]
# create attention mask in numpy
lowerCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCamelCase_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=A_ )
return encoded_outputs
| 208 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase: Optional[Any] = 5_0_0_0_0_0
lowerCAmelCase , lowerCAmelCase: List[Any] = os.path.split(__file__)
lowerCAmelCase: int = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowerCamelCase__ ( _A , **_A ):
a : str = dataset.map(**SCREAMING_SNAKE_CASE__ )
@get_duration
def lowerCamelCase__ ( _A , **_A ):
a : List[str] = dataset.filter(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( ):
a : str = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
a : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
a : int = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ )
a : Optional[int] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=SCREAMING_SNAKE_CASE__ )
def tokenize(_A ):
return tokenizer(examples['text'] )
a : List[Any] = map(SCREAMING_SNAKE_CASE__ )
a : Union[str, Any] = map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
a : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda _A : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='numpy' ):
a : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda _A : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='pandas' ):
a : str = map(SCREAMING_SNAKE_CASE__ , function=lambda _A : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
a : Optional[int] = map(SCREAMING_SNAKE_CASE__ , function=lambda _A : None , batched=SCREAMING_SNAKE_CASE__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
a : Union[str, Any] = map(SCREAMING_SNAKE_CASE__ , function=lambda _A : None , batched=SCREAMING_SNAKE_CASE__ )
a : int = map(SCREAMING_SNAKE_CASE__ , function=SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
a : Union[str, Any] = filter(SCREAMING_SNAKE_CASE__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 297 |
from math import factorial
__snake_case = {str(digit): factorial(digit) for digit in range(10)}
def _A ( SCREAMING_SNAKE_CASE__ : int ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(SCREAMING_SNAKE_CASE__ ) )
def _A ( SCREAMING_SNAKE_CASE__ : int = 60 , SCREAMING_SNAKE_CASE__ : int = 1000000 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
UpperCamelCase :Any = 0
# the cached sizes of the previous chains
UpperCamelCase :dict[int, int] = {}
for start_chain_element in range(1 , SCREAMING_SNAKE_CASE__ ):
# The temporary set will contain the elements of the chain
UpperCamelCase :List[Any] = set()
UpperCamelCase :Any = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase :Optional[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(SCREAMING_SNAKE_CASE__ )
chain_set_length += 1
UpperCamelCase :List[Any] = digit_factorial_sum(SCREAMING_SNAKE_CASE__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase :Any = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 259 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase : List[str] = Lock()
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase = max(lowerCamelCase__ , lowerCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase = Pipe()
lowerCamelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase = temp_rs
lowerCamelCase = temp_rr
for i in range(1 , len(lowerCamelCase__ ) - 1 ):
lowerCamelCase = Pipe()
lowerCamelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase = temp_rs
lowerCamelCase = temp_rr
process_array_.append(
Process(
target=lowerCamelCase__ , args=(
len(lowerCamelCase__ ) - 1,
arr[len(lowerCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase__ ) ):
lowerCamelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*lowerCamelCase__ )
lowerCamelCase = odd_even_transposition(lowerCamelCase__ )
print("""Sorted List\n""" )
print(*lowerCamelCase__ )
if __name__ == "__main__":
main()
| 357 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "altclip_text_model"
def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = project_dim
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = "altclip_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = projection_dim
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "altclip"
UpperCamelCase : Optional[Any] = True
def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""text_config_dict""" , A )
lowerCamelCase = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase = AltCLIPTextConfig(**A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase = {
str(A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowerCamelCase = AltCLIPTextConfig(**A )
lowerCamelCase = AltCLIPVisionConfig(**A )
lowerCamelCase = projection_dim
lowerCamelCase = logit_scale_init_value
lowerCamelCase = 1.0
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 66 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" ,[
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ ,i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 ,4 ), range(4 ,7 ), range(7 ,10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 ,1 ), range(1 ,2 ), range(2 ,3 )]),
] ,)
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" ,[
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] ,)
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = _split_gen_kwargs(UpperCAmelCase_ ,UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" ,[
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] ,)
def _lowercase ( __A ,__A ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
__UpperCamelCase = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 349 |
'''simple docstring'''
A__ : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = ''.join(bin(UpperCAmelCase_ )[2:].zfill(8 ) for byte in data )
__lowerCamelCase : Any = len(UpperCAmelCase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__lowerCamelCase : int = B'=' * ((6 - len(UpperCAmelCase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase_ ) % 6)
else:
__lowerCamelCase : str = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase_ ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : Dict = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(UpperCAmelCase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
__lowerCamelCase : int = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__lowerCamelCase : Union[str, Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__lowerCamelCase : Any = encoded_data[:-padding]
__lowerCamelCase : Optional[Any] = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__lowerCamelCase : Any = ''.join(
bin(B64_CHARSET.index(UpperCAmelCase_ ) )[2:].zfill(6 ) for char in encoded_data )
__lowerCamelCase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase_ ) , 8 )
]
return bytes(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : List[str] = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "bert"
def __init__(self : Optional[int] , __SCREAMING_SNAKE_CASE : str=3_0_5_2_2 , __SCREAMING_SNAKE_CASE : Dict=7_6_8 , __SCREAMING_SNAKE_CASE : Any=1_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_2 , __SCREAMING_SNAKE_CASE : Optional[int]=3_0_7_2 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0_2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : List[str]="absolute" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
class __UpperCamelCase ( _A ):
@property
def SCREAMING_SNAKE_CASE__ (self : int):
if self.task == "multiple-choice":
A = {0: "batch", 1: "choice", 2: "sequence"}
else:
A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 57 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __UpperCamelCase ( _A ):
SCREAMING_SNAKE_CASE = "speech_to_text_2"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__(self : Any , __SCREAMING_SNAKE_CASE : List[str]=1_0_0_0_0 , __SCREAMING_SNAKE_CASE : Dict=6 , __SCREAMING_SNAKE_CASE : Dict=2_0_4_8 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[Any]="relu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_5_6 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=0.0_2 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : int=1_0_2_4 , **__SCREAMING_SNAKE_CASE : str , ):
A = vocab_size
A = d_model
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = decoder_layerdrop
A = use_cache
A = decoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_target_positions
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 57 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="timm_backbone"
def __init__( self : Optional[int] , a : int=None , a : Dict=3 , a : Any=True , a : List[str]=True , a : Optional[Any]=None , **a : Optional[int] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = backbone
__lowerCamelCase = num_channels
__lowerCamelCase = features_only
__lowerCamelCase = use_pretrained_backbone
__lowerCamelCase = True
__lowerCamelCase = out_indices if out_indices is not None else (-1,)
| 67 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCAmelCase ( lowercase , lowercase ) -> Tuple:
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_text_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_text_dataset(lowercase , lowercase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> int:
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_text_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Dict:
if issubclass(lowercase , lowercase ):
__lowerCAmelCase = text_path
elif issubclass(lowercase , lowercase ):
__lowerCAmelCase = [text_path]
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_text_dataset(lowercase , lowercase )
def _lowerCAmelCase ( lowercase , lowercase , lowercase=("train",) ) -> str:
assert isinstance(lowercase , lowercase )
for split in splits:
__lowerCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[int]:
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase = TextDatasetReader({"""train""": text_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_text_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Any:
__lowerCAmelCase = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = features.copy() if features else default_expected_features
__lowerCAmelCase = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase = TextDatasetReader({"""train""": text_path} , features=lowercase , cache_dir=lowercase ).read()
_check_text_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Optional[int]:
if split:
__lowerCAmelCase = {split: text_path}
else:
__lowerCAmelCase = """train"""
__lowerCAmelCase = {"""train""": text_path, """test""": text_path}
__lowerCAmelCase = tmp_path / """cache"""
__lowerCAmelCase = {"""text""": """string"""}
__lowerCAmelCase = TextDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_text_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 358 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = np.max(lowercase , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
if "second_text" in kwargs:
__lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return self.tokenizer(__SCREAMING_SNAKE_CASE,text_pair=__SCREAMING_SNAKE_CASE,return_tensors=self.framework )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_outputs.logits[0].numpy()
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.model.config.idalabel[best_class]
__lowerCAmelCase = probabilities[best_class].item()
__lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46 | 0 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
a : Dict = HfArgumentParser(InitializationArguments)
a : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
a : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
a : str = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
a : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
a : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 56 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a_ ( *_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,_lowerCAmelCase=2 ) -> List[str]:
from .. import __version__
__lowerCamelCase : Any = take_from
__lowerCamelCase : Optional[int] = ()
if not isinstance(args[0] ,_lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
__lowerCamelCase : Union[str, Any] = None
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCAmelCase ),)
__lowerCamelCase : Optional[Any] = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
values += (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
__lowerCamelCase : List[str] = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
__lowerCamelCase : Optional[Any] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
__lowerCamelCase : Optional[int] = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,_lowerCAmelCase ,stacklevel=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCamelCase : List[str] = call_frame.filename
__lowerCamelCase : int = call_frame.lineno
__lowerCamelCase : Union[str, Any] = call_frame.function
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(_lowerCAmelCase ) == 0:
return
elif len(_lowerCAmelCase ) == 1:
return values[0]
return values
| 208 | 0 |
import os
def SCREAMING_SNAKE_CASE_ ( __A : Tuple = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCamelCase_ ) , lowerCamelCase_ ) ) as input_file:
a_ : List[Any] = [
[int(lowerCamelCase_ ) for element in line.split(',' )]
for line in input_file.readlines()
]
a_ : Tuple = len(lowerCamelCase_ )
a_ : Any = len(matrix[0] )
a_ : Any = [[-1 for _ in range(lowerCamelCase_ )] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
a_ : Any = matrix[i][0]
for j in range(1 , lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
a_ : int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCamelCase_ ):
a_ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a_ : Union[str, Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'{solution() = }')
| 368 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ : str = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[str] ) -> Tuple:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , 'config.json' ) ) and os.path.isfile(
os.path.join(__A , 'config.json' ) ):
os.remove(os.path.join(__A , 'config.json' ) )
if os.path.exists(os.path.join(__A , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__A , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__A , 'pytorch_model.bin' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Dict=False ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = 2
if unlogit:
a_ : List[str] = torch.pow(__A , __A )
a_ : Tuple = p * torch.log(__A )
a_ : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict , __A : Union[str, Any] , __A : List[str]=True , __A : str=True , __A : int=None , __A : List[str]=False ) -> List[Any]:
"""simple docstring"""
a_ , a_ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
a_ : Tuple = torch.zeros(__A , __A ).to(args.device )
a_ : Optional[int] = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
a_ : Tuple = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a_ : List[str] = None
a_ : Optional[Any] = 0.0
a_ : Optional[int] = 0.0
for step, inputs in enumerate(tqdm(__A , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
a_ : Any = tuple(t.to(args.device ) for t in inputs )
((a_) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a_ : Tuple = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a_ , a_ , a_ : Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
a_ : List[str] = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a_ : int = 2
a_ : Dict = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
a_ : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__A )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__A )
logger.info('Head ranked by importance scores' )
a_ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a_ : Tuple = torch.arange(
head_importance.numel() , device=args.device )
a_ : Optional[Any] = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : List[Any] , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ , a_ , a_ : Any = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
a_ : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __A , original_score * args.masking_threshold )
a_ : List[Any] = torch.ones_like(__A )
a_ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a_ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
a_ : Union[str, Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a_ : str = float('Inf' )
a_ : Any = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
a_ : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
a_ : Optional[Any] = new_head_mask.view(-1 )
a_ : Optional[int] = 0.0
a_ : List[str] = new_head_mask.view_as(__A )
a_ : Dict = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
a_ : Optional[int] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : int , __A : Union[str, Any] , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : Dict = datetime.now()
a_ , a_ , a_ : Union[str, Any] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
a_ : Union[str, Any] = 1 / loss
a_ : List[Any] = datetime.now() - before_time
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
a_ : List[str] = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
a_ : str = sum(p.numel() for p in model.parameters() )
a_ : Union[str, Any] = datetime.now()
a_ , a_ , a_ : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
a_ : int = 1 / loss
a_ : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __A , __A , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __A , __A )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__A , args.output_dir )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__A , type=__A , required=__A , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__A , type=__A , required=__A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__A , type=__A , required=__A , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__A , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__A , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__A , type=__A , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__A , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__A , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__A , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__A , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__A , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__A , help='Batch size.' )
parser.add_argument('--seed' , type=__A , default=42 )
parser.add_argument('--local_rank' , type=__A , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__A , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__A , default='' , help='Can be used for distant debugging.' )
a_ : List[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a_ : str = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
a_ : List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a_ : Any = torch.device('cuda' , args.local_rank )
a_ : Union[str, Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a_ : List[Any] = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
a_ : Optional[int] = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __A )
# Prepare dataset
a_ : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a_ : Tuple = (torch.from_numpy(__A ),)
a_ : Optional[int] = TensorDataset(*__A )
a_ : Any = RandomSampler(__A )
a_ : str = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a_ : Optional[Any] = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 120 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : Union[str, Any] =str(bin(_lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
_a : Optional[int] =str(bin(_lowercase ) )[2:]
if shift_amount >= len(_lowercase ):
return "0b0"
_a : Optional[int] =binary_number[: len(_lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Dict:
if number >= 0: # Get binary representation of positive number
_a : Any ="""0""" + str(bin(_lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
_a : Dict =len(bin(_lowercase )[3:] ) # Find 2's complement of number
_a : Optional[int] =bin(abs(_lowercase ) - (1 << binary_number_length) )[3:]
_a : Any =(
"""1""" + """0""" * (binary_number_length - len(_lowercase )) + binary_number
)
if shift_amount >= len(_lowercase ):
return "0b" + binary_number[0] * len(_lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Any , snake_case: Optional[Any]=13 , snake_case: Tuple=32 , snake_case: Optional[int]=2 , snake_case: Tuple=3 , snake_case: Tuple=16 , snake_case: Optional[Any]=[1, 2, 1] , snake_case: Optional[int]=[2, 2, 4] , snake_case: Optional[int]=2 , snake_case: int=2.0 , snake_case: Union[str, Any]=True , snake_case: List[str]=0.0 , snake_case: List[Any]=0.0 , snake_case: Optional[Any]=0.1 , snake_case: List[Any]="gelu" , snake_case: Optional[int]=False , snake_case: Union[str, Any]=True , snake_case: Union[str, Any]=0.0_2 , snake_case: Optional[int]=1E-5 , snake_case: Optional[Any]=True , snake_case: List[Any]=None , snake_case: List[Any]=True , snake_case: Optional[Any]=10 , snake_case: str=8 , ) -> Tuple:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :List[Any] = image_size
snake_case_ :List[Any] = patch_size
snake_case_ :int = num_channels
snake_case_ :Tuple = embed_dim
snake_case_ :str = depths
snake_case_ :str = num_heads
snake_case_ :Optional[int] = window_size
snake_case_ :Tuple = mlp_ratio
snake_case_ :Any = qkv_bias
snake_case_ :List[Any] = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Union[str, Any] = drop_path_rate
snake_case_ :Any = hidden_act
snake_case_ :Optional[Any] = use_absolute_embeddings
snake_case_ :Union[str, Any] = patch_norm
snake_case_ :Dict = layer_norm_eps
snake_case_ :str = initializer_range
snake_case_ :Tuple = is_training
snake_case_ :Tuple = scope
snake_case_ :Union[str, Any] = use_labels
snake_case_ :Optional[Any] = type_sequence_label_size
snake_case_ :Dict = encoder_stride
def lowerCAmelCase_ ( self: int ) -> int:
snake_case_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :Any = None
if self.use_labels:
snake_case_ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self: str , snake_case: Optional[int] , snake_case: Dict , snake_case: str ) -> List[Any]:
snake_case_ :Union[str, Any] = SwinvaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case )
snake_case_ :Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case_ :int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Tuple , snake_case: int ) -> Any:
snake_case_ :Dict = SwinvaForMaskedImageModeling(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ :List[Any] = 1
snake_case_ :int = SwinvaForMaskedImageModeling(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ :int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Any , snake_case: List[str] , snake_case: Union[str, Any] ) -> Tuple:
snake_case_ :int = self.type_sequence_label_size
snake_case_ :List[Any] = SwinvaForImageClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case_ :Any = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ :List[str] = config_and_inputs
snake_case_ :List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A : Any = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A : List[Any] = False
_A : List[str] = False
_A : Tuple = False
_A : List[str] = False
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :Optional[int] = SwinvaModelTester(self )
snake_case_ :List[str] = ConfigTester(self , config_class=snake_case , embed_dim=37 )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def lowerCAmelCase_ ( self: int ) -> Dict:
pass
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ :List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Optional[int] = model_class(snake_case )
snake_case_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :int = [*signature.parameters.keys()]
snake_case_ :List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :List[str] = True
for model_class in self.all_model_classes:
snake_case_ :List[Any] = True
snake_case_ :Any = False
snake_case_ :Optional[int] = True
snake_case_ :Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Any = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.attentions
snake_case_ :Dict = len(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ :Union[str, Any] = True
snake_case_ :Tuple = config.window_size**2
snake_case_ :Any = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
snake_case_ :Any = len(snake_case )
# Check attention is always last and order is fine
snake_case_ :int = True
snake_case_ :Dict = True
snake_case_ :Optional[int] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
snake_case_ :Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
snake_case_ :int = 2
self.assertEqual(out_len + added_hidden_states , len(snake_case ) )
snake_case_ :str = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCAmelCase_ ( self: int , snake_case: Dict , snake_case: Dict , snake_case: Optional[Any] , snake_case: Dict ) -> List[str]:
snake_case_ :Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
snake_case_ :Optional[int] = model(**self._prepare_for_class(snake_case , snake_case ) )
snake_case_ :str = outputs.hidden_states
snake_case_ :List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case ) , snake_case )
# Swinv2 has a different seq_length
snake_case_ :List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case_ :str = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case ) , snake_case )
snake_case_, snake_case_, snake_case_, snake_case_ :Any = reshaped_hidden_states[0].shape
snake_case_ :int = (
reshaped_hidden_states[0].view(snake_case , snake_case , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case_, snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case_ :Union[str, Any] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :List[str] = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , snake_case )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
snake_case_, snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = 3
snake_case_ :Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case_ :Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ :int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case_ :str = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Tuple = True
self.check_hidden_states_output(snake_case , snake_case , snake_case , (padded_height, padded_width) )
def lowerCAmelCase_ ( self: Any ) -> Tuple:
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
snake_case_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :List[str] = SwinvaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_, snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ :Optional[int] = _config_zero_init(snake_case )
for model_class in self.all_model_classes:
snake_case_ :Tuple = model_class(config=snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
snake_case )
snake_case_ :str = self.default_image_processor
snake_case_ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
snake_case_ :str = image_processor(images=snake_case , return_tensors="""pt""" ).to(snake_case )
# forward pass
with torch.no_grad():
snake_case_ :Tuple = model(**snake_case )
# verify the logits
snake_case_ :Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
snake_case_ :int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 66 | 0 |
"""simple docstring"""
lowercase__ : Any = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 352 |
"""simple docstring"""
import os
def __lowercase ( _a ):
snake_case_ : Tuple = len(grid[0] )
snake_case_ : Optional[int] = len(_a )
snake_case_ : Union[str, Any] = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_a ):
for j in range(n_rows - 3 ):
snake_case_ : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
snake_case_ : int = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
snake_case_ : Dict = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
snake_case_ : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
snake_case_ : List[str] = max(
_a , _a , _a , _a )
if max_product > largest:
snake_case_ : str = max_product
return largest
def __lowercase ( ):
snake_case_ : Tuple = []
with open(os.path.dirname(_a ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
snake_case_ : List[str] = [[int(_a ) for i in grid[j]] for j in range(len(_a ) )]
return largest_product(_a )
if __name__ == "__main__":
print(solution())
| 155 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a=None , __a=None , __a=None , __a=None , __a=None ):
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a )
def snake_case ( self , __a=None , __a=None , __a=None , __a=None , __a=None ):
if red is not None:
__lowerCAmelCase = red
if green is not None:
__lowerCAmelCase = green
if blue is not None:
__lowerCAmelCase = blue
if red_edge is not None:
__lowerCAmelCase = red_edge
if nir is not None:
__lowerCAmelCase = nir
return True
def snake_case ( self , __a="" , __a=None , __a=None , __a=None , __a=None , __a=None ):
self.set_matricies(red=__a , green=__a , blue=__a , red_edge=__a , nir=__a )
__lowerCAmelCase = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def snake_case ( self ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def snake_case ( self ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def snake_case ( self ):
return self.nir * (self.red / (self.green**2))
def snake_case ( self ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def snake_case ( self ):
return (self.nir - self.red) / (self.nir + self.red)
def snake_case ( self ):
return (self.nir - self.blue) / (self.nir + self.blue)
def snake_case ( self ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def snake_case ( self ):
return (self.nir - self.green) / (self.nir + self.green)
def snake_case ( self ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def snake_case ( self ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def snake_case ( self ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def snake_case ( self ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def snake_case ( self , __a=0.0_8 , __a=1.2_2 , __a=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def snake_case ( self ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def snake_case ( self ):
return (self.nir / self.green) - 1
def snake_case ( self ):
return (self.nir / self.redEdge) - 1
def snake_case ( self ):
return (self.red - self.blue) / self.red
def snake_case ( self ):
__lowerCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def snake_case ( self ):
return self.nir - self.green
def snake_case ( self ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def snake_case ( self ):
__lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def snake_case ( self , __a=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def snake_case ( self , __a=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def snake_case ( self ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def snake_case ( self , __a=None , __a=None ):
return (self.nir - b) / (a * self.red)
def snake_case ( self ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def snake_case ( self ):
return (self.red + self.green + self.blue) / 3_0.5
def snake_case ( self ):
return self.nir / self.red
def snake_case ( self ):
return (self.rvi() - 1) / (self.rvi() + 1)
def snake_case ( self ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def snake_case ( self ):
return self.green / (self.nir + self.red + self.green)
def snake_case ( self ):
return self.nir / (self.nir + self.red + self.green)
def snake_case ( self ):
return self.red / (self.nir + self.red + self.green)
def snake_case ( self ):
return (self.green - self.red) / (self.green + self.red)
def snake_case ( self ):
return (self.red - self.green) / (self.red + self.green)
def snake_case ( self ):
__lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def snake_case ( self ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def snake_case ( self ):
return self.nir / self.red
def snake_case ( self ):
return (self.ndvi() + 0.5) ** (1 / 2)
def snake_case ( self ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 57 |
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57 | 1 |
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = []
lowercase__ = 11
lowercase__ = int("1" + "0" * digit_len )
for num in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
lowercase__ = 10
return solutions
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 2 ):
lowercase__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE_ ):
lowercase__ = Fraction(SCREAMING_SNAKE_CASE_ )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 224 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowercase_ = 2_9979_2458
# Symbols
lowercase_ , lowercase_ , lowercase_ , lowercase_ = symbols("""ct x y z""")
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
# Ensure event is not empty
if event is None:
lowercase__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowercase_ = transform(2997_9245)
print("""Example of four vector: """)
print(F'ct\' = {four_vector[0]}')
print(F'x\' = {four_vector[1]}')
print(F'y\' = {four_vector[2]}')
print(F'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowercase_ = {ct: c, x: 1, y: 1, z: 1}
lowercase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'\n{numerical_vector}')
| 224 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _a ( _UpperCAmelCase ):
'''simple docstring'''
A : str = '''speech_to_text_2'''
A : List[str] = ['''past_key_values''']
A : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self, A=10_000, A=6, A=2_048, A=4, A=0.0, A=True, A="relu", A=256, A=0.1, A=0.0, A=0.0, A=0.02, A=2, A=True, A=1, A=0, A=2, A=1_024, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : int = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : Any = activation_function
SCREAMING_SNAKE_CASE : List[str] = init_std
SCREAMING_SNAKE_CASE : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_layers
SCREAMING_SNAKE_CASE : int = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Any = max_target_positions
super().__init__(
pad_token_id=A, bos_token_id=A, eos_token_id=A, decoder_start_token_id=A, **A, )
| 251 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 46 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Tuple = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Any = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 268 | """simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase( a ):
__a = torch.exp(a )
__a = torch.sum(a , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__a = x
else:
__a = x
def a__ ( self , lowerCamelCase ):
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(lowerCamelCase )
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = BertEmbeddings(lowerCamelCase )
__a = DeeBertEncoder(lowerCamelCase )
__a = BertPooler(lowerCamelCase )
self.init_weights()
def a__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ):
return self.embeddings.word_embeddings
def a__ ( self , lowerCamelCase ):
__a = value
def a__ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__a = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
__a = message
__a = exit_layer # start from 1!
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = BertPooler(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , lowerCamelCase ):
# Pooler
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
__a = self.num_layers
try:
__a = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 268 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : List[str] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase_ : List[Any] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = ['image_processor', 'tokenizer']
lowercase = 'AutoImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : int , lowerCamelCase : List[str]=None , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : str ) -> Tuple:
lowerCAmelCase_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
lowerCAmelCase_ : Tuple = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Optional[int] = self.image_processor
lowerCAmelCase_ : Any = False
def __call__( self : List[Any] , *lowerCamelCase : str , **lowerCamelCase : Tuple ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = kwargs.pop("""images""" , lowerCamelCase )
lowerCAmelCase_ : Dict = kwargs.pop("""text""" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
lowerCAmelCase_ : str = args[0]
lowerCAmelCase_ : Dict = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ : Dict = encodings["""input_ids"""]
return inputs
def __lowercase ( self : str , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowercase ( self : List[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@contextmanager
def __lowercase ( self : str ) -> Union[str, Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[Any] = self.tokenizer
yield
lowerCAmelCase_ : List[Any] = self.image_processor
lowerCAmelCase_ : List[str] = False
def __lowercase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str=False , lowerCamelCase : List[Any]=None ) -> Optional[int]:
if added_vocab is None:
lowerCAmelCase_ : str = self.tokenizer.get_added_vocab()
lowerCAmelCase_ : Union[str, Any] = {}
while tokens:
lowerCAmelCase_ : Dict = re.search(R"""<s_(.*?)>""" , lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase_ : Tuple = start_token.group(1 )
lowerCAmelCase_ : Tuple = re.search(RF'</s_{key}>' , lowerCamelCase , re.IGNORECASE )
lowerCAmelCase_ : Tuple = start_token.group()
if end_token is None:
lowerCAmelCase_ : str = tokens.replace(lowerCamelCase , """""" )
else:
lowerCAmelCase_ : List[str] = end_token.group()
lowerCAmelCase_ : Dict = re.escape(lowerCamelCase )
lowerCAmelCase_ : int = re.escape(lowerCamelCase )
lowerCAmelCase_ : Dict = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , lowerCamelCase , re.IGNORECASE )
if content is not None:
lowerCAmelCase_ : str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase_ : str = self.tokenajson(lowerCamelCase , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if value:
if len(lowerCamelCase ) == 1:
lowerCAmelCase_ : List[Any] = value[0]
lowerCAmelCase_ : Optional[Any] = value
else: # leaf nodes
lowerCAmelCase_ : List[str] = []
for leaf in content.split(R"""<sep/>""" ):
lowerCAmelCase_ : Union[str, Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase_ : Any = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase )
if len(output[key] ) == 1:
lowerCAmelCase_ : Optional[Any] = output[key][0]
lowerCAmelCase_ : List[Any] = tokens[tokens.find(lowerCamelCase ) + len(lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCamelCase , added_vocab=lowerCamelCase )
if len(lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowercase ( self : Dict ) -> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def __lowercase ( self : Dict ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 120 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = tempfile.mkdtemp()
UpperCamelCase__ :Union[str, Any] = BlipImageProcessor()
UpperCamelCase__ :List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
UpperCamelCase__ :int = BlipaProcessor(UpperCamelCase_ , UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).tokenizer
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase__ :Any = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ :List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase__ :str = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
UpperCamelCase__ :List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.get_image_processor()
UpperCamelCase__ :Any = self.get_tokenizer()
UpperCamelCase__ :str = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.prepare_image_inputs()
UpperCamelCase__ :Dict = image_processor(UpperCamelCase_ , return_tensors='''np''' )
UpperCamelCase__ :Optional[Any] = processor(images=UpperCamelCase_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.get_image_processor()
UpperCamelCase__ :Union[str, Any] = self.get_tokenizer()
UpperCamelCase__ :Dict = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = '''lower newer'''
UpperCamelCase__ :Dict = processor(text=UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = tokenizer(UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.get_image_processor()
UpperCamelCase__ :Optional[Any] = self.get_tokenizer()
UpperCamelCase__ :Optional[int] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = '''lower newer'''
UpperCamelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ :Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.get_image_processor()
UpperCamelCase__ :str = self.get_tokenizer()
UpperCamelCase__ :Dict = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ :Tuple = processor.batch_decode(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.get_image_processor()
UpperCamelCase__ :List[Any] = self.get_tokenizer()
UpperCamelCase__ :List[str] = BlipaProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
UpperCamelCase__ :str = '''lower newer'''
UpperCamelCase__ :Optional[Any] = self.prepare_image_inputs()
UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) | 357 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case = '''docs/source/en/_toctree.yml'''
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = defaultdict(__a )
UpperCamelCase__ :int = []
UpperCamelCase__ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__a )
UpperCamelCase__ :Union[str, Any] = new_doc_list
UpperCamelCase__ :Tuple = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ :Union[str, Any] = []
for duplicate_key in duplicates:
UpperCamelCase__ :Dict = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ :Union[str, Any] = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__a )
# Sort
return overview_doc
def a ( __a=False ) -> Any:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :str = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :Optional[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ :List[Any] = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ :Union[str, Any] = clean_doc_toc(__a )
UpperCamelCase__ :List[Any] = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ :Optional[int] = True
if overwrite:
UpperCamelCase__ :Dict = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ :Any = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( __a=False ) -> Optional[Any]:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :Any = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ :Any = False
UpperCamelCase__ :Union[str, Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ :Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ :Dict = pipeline_doc['''section''']
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if overwrite:
UpperCamelCase__ :Optional[int] = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ :int = True
if overwrite:
UpperCamelCase__ :Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ :Dict = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 219 | 0 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Union[dict, list, tuple, torch.Tensor]):
lowercase__ : Optional[Any] = []
if isinstance(_lowerCamelCase , _lowerCamelCase):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , (list, tuple)):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase))
elif isinstance(_lowerCamelCase , torch.Tensor):
shapes.append(tree.shape)
else:
raise ValueError("Not supported")
return shapes
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple[int, ...]):
lowercase__ : Union[str, Any] = []
for d in reversed(_lowerCamelCase):
idx.append(flat_idx % d)
lowercase__ : Union[str, Any] = flat_idx // d
return tuple(reversed(_lowerCamelCase))
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Sequence[int] , _lowerCamelCase : Optional[Sequence[bool]] = None , _lowerCamelCase : Optional[Sequence[bool]] = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_lowerCamelCase : List[bool]) -> None:
lowercase__ : List[Any] = True
for i in range(len(_lowerCamelCase)):
lowercase__ : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ : Optional[int] = l[reversed_idx]
if start_edges is None:
lowercase__ : int = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase)
if end_edges is None:
lowercase__ : Optional[Any] = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase)]
reduce_edge_list(_lowerCamelCase)
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase) == 0:
return [()]
elif len(_lowerCamelCase) == 1:
return [(slice(start[0] , end[0] + 1),)]
lowercase__ : List[Tuple[slice, ...]] = []
lowercase__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1))
else:
break
lowercase__ : Tuple[slice, ...] = tuple(_lowerCamelCase)
lowercase__ : Any = len(_lowerCamelCase)
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ))
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ : str = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ))
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1),))
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx]),))
slices.extend(lower())
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper())
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1),))
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper())
lowercase__ : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx]),))
slices.extend(lower())
return slices
@torch.jit.ignore
def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
lowercase__ : List[Any] = t.shape[:no_batch_dims]
lowercase__ : List[Any] = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase))
# _get_minimal_slice_set is inclusive
lowercase__ : List[Any] = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase))
# Get an ordered list of slices to perform
lowercase__ : Dict = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowercase__ : Tuple = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:]) for s in sliced_tensors])
def lowercase_ ( _lowerCamelCase : Callable , _lowerCamelCase : Dict[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool = False , _lowerCamelCase : Any = None , _lowerCamelCase : bool = False , ):
if not (len(_lowerCamelCase) > 0):
raise ValueError("Must provide at least one input")
lowercase__ : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase)]
lowercase__ : Optional[Any] = tuple([max(_lowerCamelCase) for s in zip(*_lowerCamelCase)])
def _prep_inputs(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims]) == no_batch_dims:
lowercase__ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
lowercase__ : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:])
else:
lowercase__ : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:])
return t
lowercase__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowerCamelCase)
lowercase__ : Optional[int] = None
if _out is not None:
lowercase__ : Union[str, Any] = tensor_tree_map(lambda _lowerCamelCase: t.view([-1] + list(t.shape[no_batch_dims:])) , _out)
lowercase__ : List[str] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ : Optional[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase : torch.Tensor) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ : Optional[int] = 0
lowercase__ : Dict = prepped_outputs
for _ in range(_lowerCamelCase):
# Chunk the input
if not low_mem:
lowercase__ : int = _select_chunk
else:
lowercase__ : Any = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size) , no_batch_dims=len(_lowerCamelCase) , )
lowercase__ : Dict[str, Any] = tensor_tree_map(_lowerCamelCase , _lowerCamelCase)
# Run the layer on the chunk
lowercase__ : Optional[int] = layer(**_lowerCamelCase)
# Allocate space for the output
if out is None:
lowercase__ : Optional[int] = tensor_tree_map(lambda _lowerCamelCase: t.new_zeros((flat_batch_dim,) + t.shape[1:]) , _lowerCamelCase)
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase):
def assign(_lowerCamelCase : dict , _lowerCamelCase : dict) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase):
assign(_lowerCamelCase , da[k])
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ : Tuple = da[k]
assign(_lowerCamelCase , _lowerCamelCase)
elif isinstance(_lowerCamelCase , _lowerCamelCase):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ : str = xa
elif isinstance(_lowerCamelCase , torch.Tensor):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ : Dict = output_chunk
else:
raise ValueError("Not supported")
i += chunk_size
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: t.view(orig_batch_dims + t.shape[1:]) , _lowerCamelCase)
return out
class snake_case_ :
def __init__( self : Optional[Any] , lowercase_ : int = 5_12 , ) -> List[Any]:
lowercase__ : str = max_chunk_size
lowercase__ : Optional[int] = None
lowercase__ : Optional[tuple] = None
def __UpperCamelCase ( self : Tuple , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ) -> int:
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ : Dict = [c for c in candidates if c > min_chunk_size]
lowercase__ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*lowercase_ , chunk_size=lowercase_ )
return True
except RuntimeError:
return False
lowercase__ : Dict = 0
lowercase__ : Any = len(lowercase_ ) - 1
while i > min_viable_chunk_size_index:
lowercase__ : Any = test_chunk_size(candidates[i] )
if not viable:
lowercase__ : Union[str, Any] = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ : int = i
lowercase__ : Union[str, Any] = (i + len(lowercase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __UpperCamelCase ( self : int , lowercase_ : Iterable , lowercase_ : Iterable ) -> bool:
lowercase__ : Optional[Any] = True
for aa, aa in zip(lowercase_ , lowercase_ ):
assert type(lowercase_ ) == type(lowercase_ )
if isinstance(lowercase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
lowercase__ : Any = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
lowercase__ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(lowercase_ , lowercase_ )
else:
consistent &= aa == aa
return consistent
def __UpperCamelCase ( self : List[str] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ) -> int:
lowercase__ : Tuple = True
lowercase__ : tuple = tree_map(lambda lowercase_ : a.shape if isinstance(lowercase_ , torch.Tensor ) else a , lowercase_ , lowercase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase_ )
lowercase__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , lowercase_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ : Union[str, Any] = False
if not consistent:
lowercase__ : Optional[int] = self._determine_favorable_chunk_size(
lowercase_ , lowercase_ , lowercase_ , )
lowercase__ : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 87 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Any , **lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@require_ftfy
def __lowercase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowerCAmelCase = f''' {text}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
def __lowercase ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __lowercase ( self : Optional[int] ):
super().test_tokenization_python_rust_equals()
def __lowercase ( self : Optional[int] ):
# CLIP always lower cases letters
pass
| 155 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 53 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
_UpperCamelCase: Any = 'naver-clova-ix/donut-base'
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Tuple:
lowercase : Any = DonutProcessor.from_pretrained(lowerCAmelCase )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Tuple = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowercase : Tuple = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowercase : Any = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase, lowerCAmelCase )
| 53 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : list ) -> list:
"""simple docstring"""
if len(lowerCAmelCase__ ) == 0:
return []
lowerCAmelCase_ ,lowerCAmelCase_ : int = min(lowerCAmelCase__ ), max(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = int(max_value - min_value ) + 1
lowerCAmelCase_ : list[list] = [[] for _ in range(lowerCAmelCase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCAmelCase__ )
return [v for bucket in buckets for v in sorted(lowerCAmelCase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 224 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCamelCase : List[str] = ["""bert-base-uncased""", """bert-base-cased"""]
__lowerCamelCase : Any = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class A__ ( tf.keras.Model ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = tokenizer
UpperCamelCase : Dict = AutoConfig.from_pretrained(A_ )
UpperCamelCase : List[str] = TFAutoModel.from_config(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tokenizer(A_ )
UpperCamelCase : List[Any] = self.bert(**A_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase : List[Any] = [
BertTokenizer.from_pretrained(A_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCamelCase : List[Any] = [TFBertTokenizer.from_pretrained(A_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(A_ , use_fast_bert_tokenizer=A_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCamelCase : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
UpperCamelCase : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __UpperCamelCase( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : Dict = tokenizer(A_ , return_tensors="tf" , padding="longest" )
UpperCamelCase : List[Any] = tf_tokenizer(A_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Optional[Any] = tf_tokenizer(self.paired_sentences )
UpperCamelCase : Any = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Any = tf.function(A_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCamelCase : int = tf.constant(A_ )
UpperCamelCase : Any = compiled_tokenizer(A_ )
UpperCamelCase : int = tf_tokenizer(A_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
UpperCamelCase : Union[str, Any] = ModelToSave(tokenizer=A_ )
UpperCamelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
UpperCamelCase : Optional[Any] = model(A_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCamelCase : Optional[int] = Path(A_ ) / "saved.model"
model.save(A_ )
UpperCamelCase : List[Any] = tf.keras.models.load_model(A_ )
UpperCamelCase : Union[str, Any] = loaded_model(A_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 362 |
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : List[Any] = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase : Tuple = True
elif char.isupper():
UpperCamelCase : str = True
return all(_lowerCAmelCase )
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) -> None:
from timeit import timeit
UpperCamelCase : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 140 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCamelCase_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
__magic_name__ = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
__magic_name__ = field(
default=10_24 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
__magic_name__ = field(default=__A , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
UpperCAmelCase_ : Union[str, Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase_ : Any = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(
default=__A , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
__magic_name__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__magic_name__ = field(
default=__A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
UpperCAmelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(A__ )
datasets.utils.logging.set_verbosity(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : str = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase_ : str = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase_ : int = data_args.train_file.split("." )[-1]
UpperCAmelCase_ : Tuple = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase_ : Optional[Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
UpperCAmelCase_ : Tuple = load_dataset("csv" ,data_files=A__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase_ : Dict = load_dataset("json" ,data_files=A__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase_ : Optional[int] = raw_datasets["train"].features["label"].names
UpperCAmelCase_ : int = len(A__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=A__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
UpperCAmelCase_ : List[str] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=A__ ,)
UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=A__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Optional[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase_ : List[Any] = {"Refused": 0, "Entailed": 1}
UpperCAmelCase_ : Optional[Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ : Tuple = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(A__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(A__ ):
UpperCAmelCase_ : List[str] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
UpperCAmelCase_ : Optional[Any] = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
UpperCAmelCase_ : str = examples["statement"]
UpperCAmelCase_ : int = list(map(_convert_table_text_to_pandas ,examples["table_text"] ) )
UpperCAmelCase_ : List[str] = tokenizer(A__ ,A__ ,padding=A__ ,max_length=A__ ,truncation=A__ )
UpperCAmelCase_ : Any = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
UpperCAmelCase_ : Union[str, Any] = raw_datasets.map(
A__ ,batched=A__ ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on dataset" ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCAmelCase_ : Tuple = raw_datasets["train"]
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCAmelCase_ : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Tuple = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
UpperCAmelCase_ : int = raw_datasets["test"]
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : List[Any] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(A__ ) ) ,3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__ ):
UpperCAmelCase_ : Dict = p.predictions[0] if isinstance(p.predictions ,A__ ) else p.predictions
UpperCAmelCase_ : Tuple = np.argmax(A__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : int = DataCollatorWithPadding(A__ ,pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : str = None
# Initialize our Trainer
UpperCAmelCase_ : List[Any] = Trainer(
model=A__ ,args=A__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=A__ ,tokenizer=A__ ,data_collator=A__ ,)
# Training
if training_args.do_train:
UpperCAmelCase_ : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Tuple = last_checkpoint
UpperCAmelCase_ : Tuple = trainer.train(resume_from_checkpoint=A__ )
UpperCAmelCase_ : str = train_result.metrics
UpperCAmelCase_ : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
UpperCAmelCase_ : Optional[Any] = min(A__ ,len(A__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,A__ )
trainer.save_metrics("train" ,A__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=A__ )
UpperCAmelCase_ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ )
UpperCAmelCase_ : Dict = min(A__ ,len(A__ ) )
trainer.log_metrics("eval" ,A__ )
trainer.save_metrics("eval" ,A__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase_ : Any = predict_dataset.remove_columns("label" )
UpperCAmelCase_ : Union[str, Any] = trainer.predict(A__ ,metric_key_prefix="predict" ).predictions
UpperCAmelCase_ : Union[str, Any] = np.argmax(A__ ,axis=1 )
UpperCAmelCase_ : Tuple = os.path.join(training_args.output_dir ,"predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(A__ ,"w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(A__ ):
UpperCAmelCase_ : Optional[int] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase_ : List[Any] = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
def snake_case ( A__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 268 |
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : int = a.name
lowerCamelCase : Any = b.name
lowerCamelCase : Optional[Any] = ''
lowerCamelCase : List[str] = ''
lowerCamelCase : int = a == b
lowerCamelCase : Tuple = name_a
lowerCamelCase : Tuple = name_b
return res
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a_, a_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g, a_, a_ )
_graph_replace_input_with(node_proto.attribute[1].g, a_, a_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g, a_, a_ )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(a_, a_, a_ )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = list(model.graph.initializer )
lowerCamelCase : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase : str = inits[i].name
lowerCamelCase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph, a_, a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Dict = os.path.dirname(a_ )
lowerCamelCase : Dict = os.path.basename(a_ )
lowerCamelCase : Any = onnx.load(os.path.join(a_, a_ ) )
lowerCamelCase : Dict = list(model.graph.initializer )
lowerCamelCase : Tuple = set()
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : int = []
lowerCamelCase : List[str] = 0
for i in range(len(a_ ) ):
if i in dup_set:
continue
for j in range(i + 1, len(a_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i], inits[j] ):
dup_set.add(a_ )
dup_set.add(a_ )
lowerCamelCase : Union[str, Any] = inits[j].data_type
lowerCamelCase : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ', a_ )
total_reduced_size += mem_size
lowerCamelCase : Tuple = inits[i].name
lowerCamelCase : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a_ )
else:
lowerCamelCase : Dict = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ', total_reduced_size / 1024 / 1024 / 1024, 'GB' )
lowerCamelCase : Tuple = sorted(a_ )
_remove_dup_initializers_from_model(a_, a_, a_ )
lowerCamelCase : int = 'optimized_' + model_file_name
lowerCamelCase : Dict = os.path.join(a_, a_ )
onnx.save(a_, a_ )
return new_model | 369 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'encoder-decoder'
lowercase_ = True
def __init__( self , **UpperCAmelCase_ ) -> str:
super().__init__(**UpperCAmelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase : List[Any] = kwargs.pop('encoder' )
lowerCamelCase : Optional[int] = encoder_config.pop('model_type' )
lowerCamelCase : str = kwargs.pop('decoder' )
lowerCamelCase : Dict = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase : int = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : List[str] = AutoConfig.for_model(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : List[str] = True
@classmethod
def _UpperCamelCase ( cls , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
lowerCamelCase : str = True
lowerCamelCase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Union[str, Any] = self.encoder.to_dict()
lowerCamelCase : List[Any] = self.decoder.to_dict()
lowerCamelCase : Tuple = self.__class__.model_type
return output
| 205 | 0 |
import socket
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE : str = socket.gethostname()
SCREAMING_SNAKE_CASE : Tuple = 12_312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE : List[Any] = sock.recv(1_024 )
if not data:
break
out_file.write(__UpperCamelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 182 | import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = '''Hello, World!'''
__lowerCamelCase : Optional[Any] = '''en_XX'''
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Path("""data_bin""" )
SCREAMING_SNAKE_CASE__ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(__UpperCamelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(__UpperCamelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE__ = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
SCREAMING_SNAKE_CASE__ = from_adapter.fca.weight
SCREAMING_SNAKE_CASE__ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCamelCase )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ = xmod.model.classification_heads["""mnli"""](xmod.extract_features(__UpperCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowerCamelCase : str = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 219 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[Any] ={
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __A ( lowerCamelCase_ ):
a__ : int = '''blenderbot-small'''
a__ : str = ['''past_key_values''']
a__ : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__(self : Any , __a : Union[str, Any]=50265 , __a : Optional[int]=512 , __a : Optional[Any]=8 , __a : Tuple=2048 , __a : Union[str, Any]=16 , __a : Union[str, Any]=8 , __a : int=2048 , __a : Optional[int]=16 , __a : List[Any]=0.0 , __a : List[str]=0.0 , __a : Optional[Any]=True , __a : Dict=True , __a : int="gelu" , __a : Optional[int]=512 , __a : str=0.1 , __a : int=0.0 , __a : str=0.0 , __a : Any=0.02 , __a : int=1 , __a : List[Any]=False , __a : List[str]=0 , __a : List[Any]=1 , __a : List[Any]=2 , __a : Dict=2 , **__a : Optional[int] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
class __A ( lowerCamelCase_ ):
@property
def _lowercase (self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ = {0: "batch"}
UpperCAmelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCAmelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(__snake_case ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCAmelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowercase (self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super().outputs
else:
UpperCAmelCase_ = super(__snake_case , self ).outputs
if self.use_past:
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
for i in range(__snake_case ):
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCAmelCase_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase (self : List[Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Generate decoder inputs
UpperCAmelCase_ = seq_length if not self.use_past else 1
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase_ = dict(**__snake_case , **__snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
UpperCAmelCase_ = common_inputs["decoder_input_ids"].shape[1]
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = decoder_seq_length + 3
UpperCAmelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__snake_case , __snake_case )] , dim=1 )
UpperCAmelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ = min(__snake_case , __snake_case )
UpperCAmelCase_ = max(__snake_case , __snake_case ) - min_num_layers
UpperCAmelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
torch.zeros(__snake_case ),
) )
# TODO: test this.
UpperCAmelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__snake_case , __snake_case ):
common_inputs["past_key_values"].append((torch.zeros(__snake_case ), torch.zeros(__snake_case )) )
return common_inputs
def _lowercase (self : Dict , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ , UpperCAmelCase_ = self.num_layers
UpperCAmelCase_ , UpperCAmelCase_ = self.num_attention_heads
UpperCAmelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase_ = common_inputs["attention_mask"].dtype
UpperCAmelCase_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
UpperCAmelCase_ = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(__snake_case )
]
return common_inputs
def _lowercase (self : List[Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ = tokenizer.num_special_tokens_to_add(__snake_case )
UpperCAmelCase_ = compute_effective_axis_dimension(
__snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase_ = dict(tokenizer(__snake_case , return_tensors=__snake_case ) )
return common_inputs
def _lowercase (self : str , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
elif self.task == "causal-lm":
UpperCAmelCase_ = self._generate_dummy_inputs_for_causal_lm(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
else:
UpperCAmelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
return common_inputs
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : List[str] , __a : List[str] , __a : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase_ = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case )
else:
UpperCAmelCase_ = super(__snake_case , self )._flatten_past_key_values_(
__snake_case , __snake_case , __snake_case , __snake_case )
| 370 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
a__ : Any = ["""pixel_values"""]
def __init__(self : Any , __a : bool = True , __a : Optional[Dict[str, int]] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : Dict[str, int] = None , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Dict , ):
super().__init__(**__a )
UpperCAmelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(__a , default_to_square=__a , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowercase (self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(__a )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(__a , size=size["shortest_edge"] , default_to_square=__a )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowercase (self : List[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : int , ):
UpperCAmelCase_ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def _lowercase (self : str , __a : np.ndarray , __a : float , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowercase (self : Any , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowercase (self : Dict , __a : ImageInput , __a : Optional[bool] = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : Optional[bool] = None , __a : Optional[float] = None , __a : Optional[bool] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__a : Dict , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__a , param_name="crop_size" , default_to_square=__a )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__a )
if not is_batched(__a ):
UpperCAmelCase_ = [images]
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=__a , tensor_type=__a )
| 106 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.